diff --git a/.github/workflows/monthly-tag.yml b/.github/workflows/monthly-tag.yml new file mode 100644 index 0000000000000..8736a341cecf9 --- /dev/null +++ b/.github/workflows/monthly-tag.yml @@ -0,0 +1,43 @@ +name: Monthly Snapshot Tag + +on: + schedule: + - cron: "0 1 1 * *" + workflow_dispatch: + +jobs: + build: + name: Take Snapshot + runs-on: ubuntu-latest + steps: + - name: Get the tags by date + id: tags + run: | + echo "::set-output name=new::$(date +'monthly-%Y-%m')" + echo "::set-output name=old::$(date -d'1 month ago' +'monthly-%Y-%m')" + - name: Checkout branch "master" + uses: actions/checkout@v2 + with: + ref: 'master' + fetch-depth: 0 + - name: Generate changelog + id: changelog + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + echo "# Automatic snapshot pre-release ${{ steps.tags.outputs.new }}" > Changelog.md + echo "" >> Changelog.md + echo "## Changes since last snapshot (${{ steps.tags.outputs.old }})" >> Changelog.md + echo "" >> Changelog.md + ./.maintain/gitlab/generate_changelog.sh ${{ steps.tags.outputs.old }} >> Changelog.md + - name: Release snapshot + id: release-snapshot + uses: actions/create-release@latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.tags.outputs.new }} + release_name: ${{ steps.tags.outputs.new }} + draft: false + prerelease: true + body_path: Changelog.md diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5cf4749eac645..9de2f79b03bca 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,7 +26,6 @@ stages: - build - publish - deploy - - flaming-fir workflow: rules: @@ -60,12 +59,15 @@ default: - kubernetes-parity-build interruptible: true +.rust-info-script: &rust-info-script + - rustup show + - cargo --version + - sccache -s + .docker-env: &docker-env image: "${CI_IMAGE}" before_script: - - rustup show - - cargo --version - - sccache -s + - *rust-info-script retry: max: 2 when: @@ -81,7 +83,6 @@ default: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 @@ -92,31 +93,24 @@ default: - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 -.build-refs: &build-refs +.test-refs-no-trigger-prs-only: &test-refs-no-trigger-prs-only rules: - # .publish-refs with manual on PRs - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: manual - allow_failure: true -.publish-refs: &publish-refs +.build-refs: &build-refs rules: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_PIPELINE_SOURCE == "web" - if: $CI_PIPELINE_SOURCE == "schedule" - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 .nightly-pipeline: &nightly-pipeline @@ -124,6 +118,24 @@ default: # this job runs only on nightly pipeline with the mentioned variable, against `master` branch - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" +.merge-ref-into-master-script: &merge-ref-into-master-script + - if [ $CI_COMMIT_REF_NAME != "master" ]; then + git fetch origin +master:master; + git fetch origin +$CI_COMMIT_REF_NAME:$CI_COMMIT_REF_NAME; + git checkout master; + git config user.email "ci@gitlab.parity.io"; + git merge $CI_COMMIT_REF_NAME --verbose --no-edit; + fi + +.cargo-check-benches-script: &cargo-check-benches-script + - mkdir -p artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA + - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all + - 'cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small --json + | tee artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::node::import::native::sr25519::transfer_keep_alive::paritydb::small.json' + - 'cargo run --release -p node-bench -- ::trie::read::small --json + | tee artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json' + - sccache -s + #### stage: .pre skip-if-draft: @@ -222,12 +234,37 @@ cargo-deny: cargo-check-benches: stage: test <<: *docker-env - <<: *test-refs-no-trigger + <<: *test-refs + <<: *collect-artifacts + before_script: + # merges in the master branch on PRs + - *merge-ref-into-master-script + - *rust-info-script script: - - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all - - cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small - - cargo run --release -p node-bench -- ::trie::read::small - - sccache -s + - *cargo-check-benches-script + +node-bench-regression-guard: + # it's not belong to `build` semantically, but dag jobs can't depend on each other + # within the single stage - https://gitlab.com/gitlab-org/gitlab/-/issues/30632 + # more: https://github.com/paritytech/substrate/pull/8519#discussion_r608012402 + stage: build + <<: *docker-env + <<: *test-refs-no-trigger-prs-only + needs: + # this is a DAG + - job: cargo-check-benches + artifacts: true + # this does not like a DAG, just polls the artifact + - project: $CI_PROJECT_PATH + job: cargo-check-benches + ref: master + artifacts: true + variables: + CI_IMAGE: "paritytech/node-bench-regression-guard:latest" + before_script: [""] + script: + - 'node-bench-regression-guard --reference artifacts/benches/master-* + --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' cargo-check-subkey: stage: test @@ -238,6 +275,14 @@ cargo-check-subkey: - SKIP_WASM_BUILD=1 time cargo check --release - sccache -s +cargo-check-try-runtime: + stage: test + <<: *docker-env + <<: *test-refs + script: + - time cargo check --features try-runtime + - sccache -s + test-deterministic-wasm: stage: test <<: *docker-env @@ -272,7 +317,7 @@ test-linux-stable: &test-linux script: # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests - time cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml - - time cargo test -p frame-support-test --features=conditional-storage --manifest-path frame/support/test/Cargo.toml + - time cargo test -p frame-support-test --features=conditional-storage --manifest-path frame/support/test/Cargo.toml # does not reuse cache 1 min 44 sec - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - sccache -s @@ -283,7 +328,6 @@ unleash-check: - if: $CI_PIPELINE_SOURCE == "pipeline" when: never - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} @@ -336,7 +380,7 @@ check-web-wasm: # Note: we don't need to test crates imported in `bin/node/cli` - time cargo build --manifest-path=client/consensus/aura/Cargo.toml --target=wasm32-unknown-unknown --features getrandom # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown -Z features=itarget + - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features --features=with-tracing @@ -407,7 +451,7 @@ test-browser-node: CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER: "wasm-bindgen-test-runner" WASM_BINDGEN_TEST_TIMEOUT: 120 script: - - cargo +nightly test --target wasm32-unknown-unknown -p node-browser-testing -Z features=itarget + - cargo +nightly test --target wasm32-unknown-unknown -p node-browser-testing build-linux-substrate: &build-binary stage: build @@ -491,7 +535,7 @@ build-rust-doc: #### stage: publish .build-push-docker-image: &build-push-docker-image - <<: *publish-refs + <<: *build-refs <<: *kubernetes-build image: quay.io/buildah/stable variables: &docker-build-vars @@ -519,15 +563,16 @@ build-rust-doc: - buildah info - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" - buildah push --format=v2s2 "$IMAGE_NAME:latest" - # pass artifacts to the trigget-simnet job - - echo "VERSION=${VERSION}" > build.env - - echo "TRIGGERER=${CI_PROJECT_NAME}" >> build.env after_script: - buildah logout "$IMAGE_NAME" + # pass artifacts to the trigger-simnet job + - echo "IMAGE_NAME=${IMAGE_NAME}" > ./artifacts/$PRODUCT/build.env + - echo "IMAGE_TAG=${VERSION}" >> ./artifacts/$PRODUCT/build.env publish-docker-substrate: stage: publish <<: *build-push-docker-image + <<: *build-refs needs: - job: build-linux-substrate artifacts: true @@ -538,7 +583,7 @@ publish-docker-substrate: reports: # this artifact is used in trigger-simnet job # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance - dotenv: artifacts/substrate/build.env + dotenv: ./artifacts/substrate/build.env publish-docker-subkey: stage: publish @@ -552,7 +597,7 @@ publish-docker-subkey: publish-s3-release: stage: publish - <<: *publish-refs + <<: *build-refs <<: *kubernetes-build needs: - job: build-linux-substrate @@ -581,7 +626,7 @@ publish-s3-doc: artifacts: true - job: build-linux-substrate artifacts: false - <<: *publish-refs + <<: *build-refs <<: *kubernetes-build variables: GIT_STRATEGY: none @@ -651,10 +696,21 @@ deploy-prometheus-alerting-rules: trigger-simnet: stage: deploy - <<: *nightly-pipeline + image: paritytech/tools:latest + rules: + - if: $CI_PIPELINE_SOURCE == "pipeline" + when: never + - if: $CI_PIPELINE_SOURCE == "web" && $CI_COMMIT_REF_NAME == "master" + - if: $CI_COMMIT_REF_NAME == "master" needs: - job: publish-docker-substrate - trigger: - project: parity/simnet - branch: master - strategy: depend + # `build.env` brings here `$IMAGE_NAME` and `$IMAGE_TAG` (`$VERSION` here, + # i.e. `2643-0.8.29-5f689e0a-6b24dc54`). + variables: + TRGR_PROJECT: ${CI_PROJECT_NAME} + TRGR_REF: ${CI_COMMIT_REF_NAME} + # simnet project ID + DWNSTRM_ID: 332 + script: + # API trigger for a simnet job + - ./scripts/gitlab/trigger_pipeline.sh diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh index c1fd7365237de..89780f082e45b 100755 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -89,7 +89,4 @@ fi diener patch --crates-to-patch ../ --substrate --path Cargo.toml # Test Polkadot pr or master branch with this Substrate commit. -time cargo test --all --release --verbose --features=real-overseer - -cd parachain/test-parachains/adder/collator/ -time cargo test --release --verbose --locked --features=real-overseer +time cargo test --all --release --verbose diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh index 4714baf54fb2f..e0412c7b7bec7 100755 --- a/.maintain/gitlab/check_polkadot_companion_status.sh +++ b/.maintain/gitlab/check_polkadot_companion_status.sh @@ -56,7 +56,7 @@ fi boldprint "companion pr: #${pr_companion}" # check the status of that pull request - needs to be -# mergable and approved +# approved and mergable curl -H "${github_header}" -sS -o companion_pr.json \ ${github_api_polkadot_pull_url}/${pr_companion} @@ -64,20 +64,6 @@ curl -H "${github_header}" -sS -o companion_pr.json \ pr_head_sha=$(jq -r -e '.head.sha' < companion_pr.json) boldprint "Polkadot PR's HEAD SHA: $pr_head_sha" -if jq -e .merged < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} already merged" - exit 0 -fi - -if jq -e '.mergeable' < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} mergeable" -else - boldprint "polkadot pr #${pr_companion} not mergeable" - exit 1 -fi - curl -H "${github_header}" -sS -o companion_pr_reviews.json \ ${github_api_polkadot_pull_url}/${pr_companion}/reviews @@ -98,6 +84,19 @@ if [ -z "$(jq -r -e '.[].state | select(. == "APPROVED")' < companion_pr_reviews fi boldprint "polkadot pr #${pr_companion} state APPROVED" -exit 0 +if jq -e .merged < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} already merged" + exit 0 +fi + +if jq -e '.mergeable' < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} mergeable" +else + boldprint "polkadot pr #${pr_companion} not mergeable" + exit 1 +fi +exit 0 diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh index a1190f2bf0bc6..32ac1760a6117 100755 --- a/.maintain/gitlab/generate_changelog.sh +++ b/.maintain/gitlab/generate_changelog.sh @@ -32,7 +32,7 @@ $line" runtime_changes="$runtime_changes $line" fi - if has_label 'paritytech/substrate' "$pr_id" 'D1-runtime-migration'; then + if has_label 'paritytech/substrate' "$pr_id" 'E1-runtime-migration'; then migrations="$migrations $line" fi diff --git a/.maintain/gitlab/trigger_pipeline.sh b/.maintain/gitlab/trigger_pipeline.sh new file mode 100755 index 0000000000000..dd9da8102d53a --- /dev/null +++ b/.maintain/gitlab/trigger_pipeline.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +set -eu +# API trigger another project's pipeline +echo "Triggering Simnet pipeline." + +curl --silent \ + -X POST \ + -F "token=${CI_JOB_TOKEN}" \ + -F "ref=master" \ + -F "variables[TRGR_PROJECT]=${TRGR_PROJECT}" \ + -F "variables[TRGR_REF]=${TRGR_REF}" \ + -F "variables[IMAGE_NAME]=${IMAGE_NAME}" \ + -F "variables[IMAGE_TAG]=${IMAGE_TAG}" \ + "https://${CI_SERVER_HOST}/api/v4/projects/${DWNSTRM_ID}/trigger/pipeline" | \ + tee pipeline + +PIPELINE_ID=$(cat pipeline | jq ".id") +PIPELINE_URL=$(cat pipeline | jq ".web_url") +echo +echo "Simnet pipeline ${PIPELINE_URL} was successfully triggered." +echo "Now we're polling it to obtain the distinguished status." + +# This is a workaround for a Gitlab bug, waits here until +# https://gitlab.com/gitlab-org/gitlab/-/issues/326137 gets fixed. +# The timeout is 360 curls with 8 sec interval, roughly an hour. + +function get_status() { + curl --silent \ + --header "PRIVATE-TOKEN: ${PIPELINE_TOKEN}" \ + "https://${CI_SERVER_HOST}/api/v4/projects/${DWNSTRM_ID}/pipelines/${PIPELINE_ID}" | \ + jq --raw-output ".status"; +} + +echo "Waiting on ${PIPELINE_ID} status..." + +for i in $(seq 1 360); do + STATUS=$(get_status); + echo "Triggered pipeline status is ${STATUS}"; + if [[ ${STATUS} =~ ^(pending|running|created)$ ]]; then + echo "${STATUS}"..."; + elif [[ ${STATUS} =~ ^(failed|canceled|skipped|manual)$ ]]; then + echo "Oh noes! Something's broken in: ${PIPELINE_URL}"; exit 1; + elif [[ ${STATUS} =~ ^(success)$ ]]; then + echo "Look how green it is: ${PIPELINE_URL}"; exit 0; + else + echo "Something else has happened in ${PIPELINE_URL}"; exit 1; + fi +sleep 8; +done diff --git a/.maintain/node-template-release.sh b/.maintain/node-template-release.sh index 1a6c245320593..fd470a3dce17a 100755 --- a/.maintain/node-template-release.sh +++ b/.maintain/node-template-release.sh @@ -10,7 +10,7 @@ if [ "$#" -ne 1 ]; then exit 1 fi -PATH_TO_ARCHIVE=$(pwd)/$1 +PATH_TO_ARCHIVE=$1 cd $PROJECT_ROOT/.maintain/node-template-release cargo run $PROJECT_ROOT/bin/node-template $PATH_TO_ARCHIVE diff --git a/.maintain/node-template-release/Cargo.toml b/.maintain/node-template-release/Cargo.toml index dd3166d58ddf4..c1d9f2da7faea 100644 --- a/.maintain/node-template-release/Cargo.toml +++ b/.maintain/node-template-release/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-release" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" diff --git a/.maintain/node-template-release/src/main.rs b/.maintain/node-template-release/src/main.rs index a1d85bf33fe33..bf37797419bcb 100644 --- a/.maintain/node-template-release/src/main.rs +++ b/.maintain/node-template-release/src/main.rs @@ -99,8 +99,7 @@ fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, c let deps_rewritten = dependencies .iter() .filter_map(|(k, v)| v.clone().try_into::().ok().map(move |v| (k, v))) - .filter(|t| t.1.contains_key("path")) - .filter(|t| { + .filter(|t| t.1.contains_key("path") && { // if the path does not exists, we need to add this as git dependency t.1.get("path").unwrap().as_str().map(|path| !cargo_toml_path.join(path).exists()).unwrap_or(false) }) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7a7592ed6def..d38696714ba6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [2.17.0] +### Changed +- Updated Substrate to polkadot-v0.9.0 + ## [2.16.0] ### Changed - Updated Substrate to polkadot-v0.8.30 diff --git a/Cargo.lock b/Cargo.lock index ab10739faab99..45356b7b735cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -177,23 +177,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] -name = "asn1_der" -version = "0.6.3" +name = "arrayvec" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fce6b6a0ffdafebd82c87e79e3f40e8d2c523e5fea5566ff6b90509bf98d638" -dependencies = [ - "asn1_der_derive", -] +checksum = "5a2f58b0bb10c380af2b26e57212856b8c9a59e0925b4c20f4a174a49734eaf7" [[package]] -name = "asn1_der_derive" -version = "0.1.2" +name = "asn1_der" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" -dependencies = [ - "quote", - "syn", -] +checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" [[package]] name = "assert_cmd" @@ -448,7 +441,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.23.0", + "object", "rustc-demangle", ] @@ -476,6 +469,12 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "beef" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6736e2428df2ca2848d846c43e88745121a6654696e349ce0054a420815a7409" + [[package]] name = "bincode" version = "1.3.2" @@ -488,26 +487,21 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.54.0" +version = "0.57.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66c0bb6167449588ff70803f4127f0684f9063097eca5016f37eb52b92c2cf36" +checksum = "fd4865004a46a0aafb2a0a5eb19d3c9fc46ee5f063a6cfc605c69ac9ecf5263d" dependencies = [ "bitflags", "cexpr", - "cfg-if 0.1.10", "clang-sys", - "clap", - "env_logger 0.7.1", "lazy_static", "lazycell", - "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "which 3.1.1", ] [[package]] @@ -516,16 +510,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" -[[package]] -name = "bitvec" -version = "0.17.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] - [[package]] name = "bitvec" version = "0.20.2" @@ -533,7 +517,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f682656975d3a682daff957be4ddeb65d6ad656737cd821f2d00685ae466af1" dependencies = [ "funty", - "radium 0.6.2", + "radium", "tap", "wyz", ] @@ -689,12 +673,6 @@ version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" -[[package]] -name = "byte-slice-cast" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" - [[package]] name = "byte-slice-cast" version = "1.0.0" @@ -742,6 +720,15 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +[[package]] +name = "camino" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4648c6d00a709aa069a236adcaae4f605a6241c72bf5bee79331a4b625921a9" +dependencies = [ + "serde", +] + [[package]] name = "cargo-platform" version = "0.1.1" @@ -753,10 +740,11 @@ dependencies = [ [[package]] name = "cargo_metadata" -version = "0.12.3" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7714a157da7991e23d90686b9524b9e12e0407a108647f52e9328f4b3d51ac7f" +checksum = "081e3f0755c1f380c2d010481b6fa2e02973586d5f2b24eebb7a2a1d98b143d8" dependencies = [ + "camino", "cargo-platform", "semver 0.11.0", "semver-parser 0.10.2", @@ -886,13 +874,13 @@ dependencies = [ [[package]] name = "clang-sys" -version = "0.29.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" +checksum = "853eda514c284c2287f4bf20ae614f8781f40a81d32ecda6e91449304dfe077c" dependencies = [ "glob", "libc", - "libloading", + "libloading 0.7.0", ] [[package]] @@ -984,18 +972,18 @@ checksum = "dcb25d077389e53838a8158c8e99174c5a9d902dee4904320db714f3c653ffba" [[package]] name = "cranelift-bforest" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4066fd63b502d73eb8c5fa6bcab9c7962b05cd580f6b149ee83a8e730d8ce7fb" +checksum = "bcee7a5107071484772b89fdf37f0f460b7db75f476e43ea7a684fd942470bcf" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a54e4beb833a3c873a18a8fe735d73d732044004c7539a072c8faa35ccb0c60" +checksum = "654ab96f0f1cab71c0d323618a58360a492da2c341eb2c1f977fc195c664001b" dependencies = [ "byteorder", "cranelift-bforest", @@ -1013,9 +1001,9 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54cac7cacb443658d8f0ff36a3545822613fa202c946c0891897843bc933810" +checksum = "65994cfc5be9d5fd10c5fc30bcdddfa50c04bb79c91329287bff846434ff8f14" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -1023,24 +1011,27 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a109760aff76788b2cdaeefad6875a73c2b450be13906524f6c2a81e05b8d83c" +checksum = "889d720b688b8b7df5e4903f9b788c3c59396050f5548e516e58ccb7312463ab" +dependencies = [ + "serde", +] [[package]] name = "cranelift-entity" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b044234aa32531f89a08b487630ddc6744696ec04c8123a1ad388de837f5de3" +checksum = "1a2e6884a363e42a9ba980193ea8603a4272f8a92bd8bbaf9f57a94dbea0ff96" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5452b3e4e97538ee5ef2cc071301c69a86c7adf2770916b9d04e9727096abd93" +checksum = "e6f41e2f9b57d2c030e249d0958f1cdc2c3cd46accf8c0438b3d1944e9153444" dependencies = [ "cranelift-codegen", "log", @@ -1050,25 +1041,24 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f68035c10b2e80f26cc29c32fa824380877f38483504c2a47b54e7da311caaf3" +checksum = "aab70ba7575665375d31cbdea2462916ce58be887834e1b83c860b43b51af637" dependencies = [ "cranelift-codegen", - "raw-cpuid", "target-lexicon", ] [[package]] name = "cranelift-wasm" -version = "0.69.0" +version = "0.71.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a530eb9d1c95b3309deb24c3d179d8b0ba5837ed98914a429787c395f614949d" +checksum = "f2fc3d2e70da6439adf97648dcdf81834363154f2907405345b6fbe7ca38918c" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools 0.9.0", + "itertools 0.10.0", "log", "serde", "smallvec 1.6.1", @@ -1355,6 +1345,12 @@ dependencies = [ "syn", ] +[[package]] +name = "diff" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" + [[package]] name = "difference" version = "2.0.0" @@ -1680,7 +1676,7 @@ dependencies = [ "futures-timer 3.0.2", "log", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "rand 0.8.3", ] @@ -1726,7 +1722,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" name = "fork-tree" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", ] [[package]] @@ -1748,7 +1744,7 @@ dependencies = [ "hex-literal", "linregress", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "paste 1.0.4", "serde", "sp-api", @@ -1767,7 +1763,7 @@ dependencies = [ "chrono", "frame-benchmarking", "handlebars", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-cli", "sc-client-db", "sc-executor", @@ -1787,7 +1783,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-arithmetic", "sp-npos-elections", "sp-runtime", @@ -1804,9 +1800,9 @@ dependencies = [ "pallet-balances", "pallet-indices", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", + "sp-inherents", "sp-io", "sp-runtime", "sp-std", @@ -1818,7 +1814,7 @@ dependencies = [ name = "frame-metadata" version = "13.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-std", @@ -1835,10 +1831,10 @@ dependencies = [ "impl-trait-for-tuples", "log", "once_cell", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "paste 1.0.4", - "pretty_assertions", + "pretty_assertions 0.6.1", "serde", "smallvec 1.6.1", "sp-arithmetic", @@ -1890,8 +1886,8 @@ dependencies = [ "frame-metadata", "frame-support", "frame-system", - "parity-scale-codec 2.0.1", - "pretty_assertions", + "parity-scale-codec", + "pretty_assertions 0.6.1", "rustversion", "serde", "sp-core", @@ -1911,7 +1907,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-externalities", @@ -1929,7 +1925,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -1941,7 +1937,7 @@ dependencies = [ name = "frame-system-rpc-runtime-api" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", ] @@ -1950,7 +1946,7 @@ name = "frame-try-runtime" version = "0.9.0" dependencies = [ "frame-support", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", @@ -1964,7 +1960,7 @@ checksum = "03d47dad3685eceed8488986cad3d5027165ea5edb164331770e2059555f10a5" dependencies = [ "lazy_static", "libc", - "libloading", + "libloading 0.5.2", "winapi 0.3.9", ] @@ -2650,22 +2646,6 @@ dependencies = [ "libc", ] -[[package]] -name = "if-watch" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b8538953a3f0d0d3868f0a706eb4273535e10d72acb5c82c1c23ae48835c85" -dependencies = [ - "async-io", - "futures 0.3.13", - "futures-lite", - "if-addrs", - "ipnet", - "libc", - "log", - "winapi 0.3.9", -] - [[package]] name = "if-watch" version = "0.2.0" @@ -2688,7 +2668,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df170efa359aebdd5cb7fe78edcc67107748e4737bdca8a8fb40d15ea7a877ed" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", ] [[package]] @@ -2821,9 +2801,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.47" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cfb73131c35423a367daf8cbd24100af0d077668c8c2943f0e7dd775fef0f65" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] @@ -2953,30 +2933,31 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.2.0-alpha" +version = "0.2.0-alpha.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "124797a4ea7430d0675db78e065e53316e3f1a3cbf0ee4d6dbdd42db7b08e193" +checksum = "2737440f37efa10e5ef7beeec43d059d29dc92640978be21fcdcef481a2edb0d" dependencies = [ "async-trait", - "futures 0.3.13", + "fnv", "hyper 0.13.10", + "hyper-rustls", "jsonrpsee-types", "jsonrpsee-utils", "log", "serde", "serde_json 1.0.64", "thiserror", - "unicase", "url 2.2.1", ] [[package]] name = "jsonrpsee-proc-macros" -version = "0.2.0-alpha.2" +version = "0.2.0-alpha.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb3f732ccbeafd15cefb59c7c7b5ac6c553c2653613b63e5e7feb7f06a219e9" +checksum = "5784ee8bb31988fa2c7a755fe31b0e21aa51894a67e5c99b6d4470f0253bf31a" dependencies = [ "Inflector", + "proc-macro-crate 1.0.0", "proc-macro2", "quote", "syn", @@ -2984,32 +2965,29 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.2.0-alpha.2" +version = "0.2.0-alpha.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a8cd20c190e75dc56f7543b9d5713c3186351b301b5507ea6b85d8c403aac78" +checksum = "bab3dabceeeeb865897661d532d47202eaae71cd2c606f53cb69f1fbc0555a51" dependencies = [ "async-trait", - "futures 0.3.13", + "beef", + "futures-channel", + "futures-util", "log", "serde", "serde_json 1.0.64", - "smallvec 1.6.1", "thiserror", ] [[package]] name = "jsonrpsee-utils" -version = "0.2.0-alpha" +version = "0.2.0-alpha.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0e45394ec3175a767c3c5bac584560e6ad9b56ebd73216c85ec8bab49619244" +checksum = "d63cf4d423614e71fd144a8691208539d2b23d8373e069e2fbe023c5eba5e922" dependencies = [ - "futures 0.3.13", - "globset", + "futures-util", "hyper 0.13.10", "jsonrpsee-types", - "lazy_static", - "log", - "unicase", ] [[package]] @@ -3140,81 +3118,53 @@ dependencies = [ ] [[package]] -name = "libm" -version = "0.2.1" +name = "libloading" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" +checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" +dependencies = [ + "cfg-if 1.0.0", + "winapi 0.3.9", +] [[package]] -name = "libp2p" -version = "0.35.1" +name = "libm" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" -dependencies = [ - "atomic", - "bytes 1.0.1", - "futures 0.3.13", - "lazy_static", - "libp2p-core 0.27.1", - "libp2p-deflate 0.27.1", - "libp2p-dns 0.27.0", - "libp2p-floodsub 0.27.0", - "libp2p-gossipsub 0.28.0", - "libp2p-identify 0.27.0", - "libp2p-kad 0.28.1", - "libp2p-mdns 0.28.1", - "libp2p-mplex 0.27.1", - "libp2p-noise 0.29.0", - "libp2p-ping 0.27.0", - "libp2p-plaintext 0.27.1", - "libp2p-pnet", - "libp2p-request-response 0.9.1", - "libp2p-swarm 0.27.2", - "libp2p-swarm-derive", - "libp2p-tcp 0.27.1", - "libp2p-uds 0.27.0", - "libp2p-wasm-ext 0.27.0", - "libp2p-websocket 0.28.0", - "libp2p-yamux 0.30.1", - "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "smallvec 1.6.1", - "wasm-timer", -] +checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.36.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe5759b526f75102829c15e4d8566603b4bf502ed19b5f35920d98113873470d" +checksum = "08053fbef67cd777049ef7a95ebaca2ece370b4ed7712c3fa404d69a88cb741b" dependencies = [ "atomic", "bytes 1.0.1", "futures 0.3.13", "lazy_static", - "libp2p-core 0.28.1", - "libp2p-deflate 0.28.0", - "libp2p-dns 0.28.0", - "libp2p-floodsub 0.28.0", - "libp2p-gossipsub 0.29.0", - "libp2p-identify 0.28.0", - "libp2p-kad 0.29.0", - "libp2p-mdns 0.29.0", - "libp2p-mplex 0.28.0", - "libp2p-noise 0.30.0", - "libp2p-ping 0.28.0", - "libp2p-plaintext 0.28.0", + "libp2p-core", + "libp2p-deflate", + "libp2p-dns", + "libp2p-floodsub", + "libp2p-gossipsub", + "libp2p-identify", + "libp2p-kad", + "libp2p-mdns", + "libp2p-mplex", + "libp2p-noise", + "libp2p-ping", + "libp2p-plaintext", "libp2p-pnet", "libp2p-relay", - "libp2p-request-response 0.10.0", - "libp2p-swarm 0.28.0", + "libp2p-request-response", + "libp2p-swarm", "libp2p-swarm-derive", - "libp2p-tcp 0.28.0", - "libp2p-uds 0.28.0", - "libp2p-wasm-ext 0.28.0", - "libp2p-websocket 0.29.0", - "libp2p-yamux 0.31.0", + "libp2p-tcp", + "libp2p-uds", + "libp2p-wasm-ext", + "libp2p-websocket", + "libp2p-yamux", "parity-multiaddr", "parking_lot 0.11.1", "pin-project 1.0.5", @@ -3224,43 +3174,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" -dependencies = [ - "asn1_der", - "bs58", - "ed25519-dalek", - "either", - "fnv", - "futures 0.3.13", - "futures-timer 3.0.2", - "lazy_static", - "libsecp256k1", - "log", - "multihash", - "multistream-select", - "parity-multiaddr", - "parking_lot 0.11.1", - "pin-project 1.0.5", - "prost", - "prost-build", - "rand 0.7.3", - "ring", - "rw-stream-sink", - "sha2 0.9.3", - "smallvec 1.6.1", - "thiserror", - "unsigned-varint 0.7.0", - "void", - "zeroize", -] - -[[package]] -name = "libp2p-core" -version = "0.28.1" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e1797734bbd4c453664fefb029628f77c356ffc5bce98f06b18a7db3ebb0f7" +checksum = "71dd51b562e14846e65bad00e5808d0644376e6588668c490d3c48e1dfeb4a9a" dependencies = [ "asn1_der", "bs58", @@ -3290,17 +3206,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "libp2p-deflate" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d42eed63305f0420736fa487f9acef720c4528bd7852a6a760f5ccde4813345" -dependencies = [ - "flate2", - "futures 0.3.13", - "libp2p-core 0.27.1", -] - [[package]] name = "libp2p-deflate" version = "0.28.0" @@ -3309,29 +3214,18 @@ checksum = "a2181a641cd15f9b6ba71b1335800f309012a0a97a29ffaabbbf40e9d3d58f08" dependencies = [ "flate2", "futures 0.3.13", - "libp2p-core 0.28.1", -] - -[[package]] -name = "libp2p-dns" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5153b6db68fd4baa3b304e377db744dd8fea8ff4e4504509ee636abcde88d3e3" -dependencies = [ - "futures 0.3.13", - "libp2p-core 0.27.1", - "log", + "libp2p-core", ] [[package]] name = "libp2p-dns" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9712eb3e9f7dcc77cc5ca7d943b6a85ce4b1faaf91a67e003442412a26d6d6f8" +checksum = "62e63dab8b5ff35e0c101a3e51e843ba782c07bbb1682f5fd827622e0d02b98b" dependencies = [ "async-std-resolver", "futures 0.3.13", - "libp2p-core 0.28.1", + "libp2p-core", "log", "smallvec 1.6.1", "trust-dns-resolver", @@ -3339,33 +3233,15 @@ dependencies = [ [[package]] name = "libp2p-floodsub" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c63dfa06581b24b1d12bf9815b43689a784424be217d6545c800c7c75a207f" -dependencies = [ - "cuckoofilter", - "fnv", - "futures 0.3.13", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "smallvec 1.6.1", -] - -[[package]] -name = "libp2p-floodsub" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897645f99e9b396df256a6aa8ba8c4bc019ac6b7c62556f624b5feea9acc82bb" +checksum = "48a9b570f6766301d9c4aa00fce3554cad1598e2f466debbc4dde909028417cf" dependencies = [ "cuckoofilter", "fnv", "futures 0.3.13", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", + "libp2p-core", + "libp2p-swarm", "log", "prost", "prost-build", @@ -3375,35 +3251,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" -dependencies = [ - "asynchronous-codec 0.6.0", - "base64 0.13.0", - "byteorder", - "bytes 1.0.1", - "fnv", - "futures 0.3.13", - "hex_fmt", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "regex", - "sha2 0.9.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-gossipsub" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "794b0c85f5df1acbc1fc38414d37272594811193b6325c76d3931c3e3f5df8c0" +checksum = "73cb9a89a301afde1e588c73f7e9131e12a5388725f290a9047b878862db1b53" dependencies = [ "asynchronous-codec 0.6.0", "base64 0.13.0", @@ -3412,8 +3262,8 @@ dependencies = [ "fnv", "futures 0.3.13", "hex_fmt", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", + "libp2p-core", + "libp2p-swarm", "log", "prost", "prost-build", @@ -3427,67 +3277,25 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b40fb36a059b7a8cce1514bd8b546fa612e006c9937caa7f5950cb20021fe91e" -dependencies = [ - "futures 0.3.13", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", - "log", - "prost", - "prost-build", - "smallvec 1.6.1", - "wasm-timer", -] - -[[package]] -name = "libp2p-identify" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f88ebc841d744979176ab4b8b294a3e655a7ba4ef26a905d073a52b49ed4dff5" -dependencies = [ - "futures 0.3.13", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", - "log", - "prost", - "prost-build", - "smallvec 1.6.1", - "wasm-timer", -] - -[[package]] -name = "libp2p-kad" -version = "0.28.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3da6c9acbcc05f93235d201d7d45ef4e8b88a45d8836f98becd8b4d443f066" +checksum = "5f668f00efd9883e8b7bcc582eaf0164615792608f886f6577da18bcbeea0a46" dependencies = [ - "arrayvec 0.5.2", - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "either", - "fnv", "futures 0.3.13", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", + "libp2p-core", + "libp2p-swarm", "log", "prost", "prost-build", - "rand 0.7.3", - "sha2 0.9.3", "smallvec 1.6.1", - "uint", - "unsigned-varint 0.7.0", - "void", "wasm-timer", ] [[package]] name = "libp2p-kad" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb5b90b6bda749023a85f60b49ea74b387c25f17d8df541ae72a3c75dd52e63" +checksum = "b07312ebe5ee4fd2404447a0609814574df55c65d4e20838b957bbd34907d820" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec 0.6.0", @@ -3495,8 +3303,8 @@ dependencies = [ "either", "fnv", "futures 0.3.13", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", + "libp2p-core", + "libp2p-swarm", "log", "prost", "prost-build", @@ -3511,39 +3319,18 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e9e6374814d1b118d97ccabdfc975c8910bd16dc38a8bc058eeb08bf2080fe1" -dependencies = [ - "async-io", - "data-encoding", - "dns-parser", - "futures 0.3.13", - "if-watch 0.1.8", - "lazy_static", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", - "log", - "rand 0.7.3", - "smallvec 1.6.1", - "socket2 0.3.19", - "void", -] - -[[package]] -name = "libp2p-mdns" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be28ca13bb648d249a9baebd750ebc64ce7040ddd5f0ce1035ff1f4549fb596d" +checksum = "c221897b3fd7f215de7ecfec215c5eba598e5b61c605b5f8b56fe8a4fb507724" dependencies = [ "async-io", "data-encoding", "dns-parser", "futures 0.3.13", - "if-watch 0.2.0", + "if-watch", "lazy_static", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", + "libp2p-core", + "libp2p-swarm", "log", "rand 0.8.3", "smallvec 1.6.1", @@ -3551,24 +3338,6 @@ dependencies = [ "void", ] -[[package]] -name = "libp2p-mplex" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core 0.27.1", - "log", - "nohash-hasher", - "parking_lot 0.11.1", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", -] - [[package]] name = "libp2p-mplex" version = "0.28.0" @@ -3578,7 +3347,7 @@ dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures 0.3.13", - "libp2p-core 0.28.1", + "libp2p-core", "log", "nohash-hasher", "parking_lot 0.11.1", @@ -3587,28 +3356,6 @@ dependencies = [ "unsigned-varint 0.7.0", ] -[[package]] -name = "libp2p-noise" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aca322b52a0c5136142a7c3971446fb1e9964923a526c9cc6ef3b7c94e57778" -dependencies = [ - "bytes 1.0.1", - "curve25519-dalek 3.0.2", - "futures 0.3.13", - "lazy_static", - "libp2p-core 0.27.1", - "log", - "prost", - "prost-build", - "rand 0.7.3", - "sha2 0.9.3", - "snow", - "static_assertions", - "x25519-dalek", - "zeroize", -] - [[package]] name = "libp2p-noise" version = "0.30.0" @@ -3619,7 +3366,7 @@ dependencies = [ "curve25519-dalek 3.0.2", "futures 0.3.13", "lazy_static", - "libp2p-core 0.28.1", + "libp2p-core", "log", "prost", "prost-build", @@ -3633,51 +3380,19 @@ dependencies = [ [[package]] name = "libp2p-ping" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3813276d0708c8db0f500d8beda1bda9ad955723b9cb272c41f4727256f73c" -dependencies = [ - "futures 0.3.13", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", - "log", - "rand 0.7.3", - "void", - "wasm-timer", -] - -[[package]] -name = "libp2p-ping" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dea10fc5209260915ea65b78f612d7ff78a29ab288e7aa3250796866af861c45" +checksum = "bf4bfaffac63bf3c7ec11ed9d8879d455966ddea7e78ee14737f0b6dce0d1cd1" dependencies = [ "futures 0.3.13", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", + "libp2p-core", + "libp2p-swarm", "log", "rand 0.7.3", "void", "wasm-timer", ] -[[package]] -name = "libp2p-plaintext" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d58defcadb646ae4b033e130b48d87410bf76394dc3335496cae99dac803e61" -dependencies = [ - "asynchronous-codec 0.6.0", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core 0.27.1", - "log", - "prost", - "prost-build", - "unsigned-varint 0.7.0", - "void", -] - [[package]] name = "libp2p-plaintext" version = "0.28.0" @@ -3687,7 +3402,7 @@ dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures 0.3.13", - "libp2p-core 0.28.1", + "libp2p-core", "log", "prost", "prost-build", @@ -3711,16 +3426,16 @@ dependencies = [ [[package]] name = "libp2p-relay" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ff268be6a9d6f3c6cca3b81bbab597b15217f9ad8787c6c40fc548c1af7cd24" +checksum = "0b8786aca3f18671d8776289706a5521f6c9124a820f69e358de214b9939440d" dependencies = [ "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures 0.3.13", "futures-timer 3.0.2", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", + "libp2p-core", + "libp2p-swarm", "log", "pin-project 1.0.5", "prost", @@ -3733,70 +3448,34 @@ dependencies = [ ] [[package]] -name = "libp2p-request-response" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10e5552827c33d8326502682da73a0ba4bfa40c1b55b216af3c303f32169dd89" -dependencies = [ - "async-trait", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core 0.27.1", - "libp2p-swarm 0.27.2", - "log", - "lru", - "minicbor 0.7.2", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-request-response" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "725367dd2318c54c5ab1a6418592e5b01c63b0dedfbbfb8389220b2bcf691899" -dependencies = [ - "async-trait", - "bytes 1.0.1", - "futures 0.3.13", - "libp2p-core 0.28.1", - "libp2p-swarm 0.28.0", - "log", - "lru", - "minicbor 0.8.0", - "rand 0.7.3", - "smallvec 1.6.1", - "unsigned-varint 0.7.0", - "wasm-timer", -] - -[[package]] -name = "libp2p-swarm" -version = "0.27.2" +name = "libp2p-request-response" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" +checksum = "1cdbe172f08e6d0f95fa8634e273d4c4268c4063de2e33e7435194b0130c62e3" dependencies = [ - "either", + "async-trait", + "bytes 1.0.1", "futures 0.3.13", - "libp2p-core 0.27.1", + "libp2p-core", + "libp2p-swarm", "log", + "lru", + "minicbor", "rand 0.7.3", "smallvec 1.6.1", - "void", + "unsigned-varint 0.7.0", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75c26980cadd7c25d89071cb23e1f7f5df4863128cc91d83c6ddc72338cecafa" +checksum = "1e04d8e1eef675029ec728ba14e8d0da7975d84b6679b699b4ae91a1de9c3a92" dependencies = [ "either", "futures 0.3.13", - "libp2p-core 0.28.1", + "libp2p-core", "log", "rand 0.7.3", "smallvec 1.6.1", @@ -3806,31 +3485,14 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c564ebaa36a64839f51eaddb0243aaaa29ce64affb56129193cc3248b72af273" +checksum = "365b0a699fea5168676840567582a012ea297b1ca02eee467e58301b9c9c5eed" dependencies = [ "quote", "syn", ] -[[package]] -name = "libp2p-tcp" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" -dependencies = [ - "async-io", - "futures 0.3.13", - "futures-timer 3.0.2", - "if-watch 0.1.8", - "ipnet", - "libc", - "libp2p-core 0.27.1", - "log", - "socket2 0.3.19", -] - [[package]] name = "libp2p-tcp" version = "0.28.0" @@ -3840,26 +3502,14 @@ dependencies = [ "async-io", "futures 0.3.13", "futures-timer 3.0.2", - "if-watch 0.2.0", + "if-watch", "ipnet", "libc", - "libp2p-core 0.28.1", + "libp2p-core", "log", "socket2 0.4.0", ] -[[package]] -name = "libp2p-uds" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80ac51ce419f60be966e02103c17f67ff5dc4422ba83ba54d251d6c62a4ed487" -dependencies = [ - "async-std", - "futures 0.3.13", - "libp2p-core 0.27.1", - "log", -] - [[package]] name = "libp2p-uds" version = "0.28.0" @@ -3868,56 +3518,24 @@ checksum = "ffd6564bb3b7ff203661ccbb69003c2b551e34cef974f2d6c6a28306a12170b5" dependencies = [ "async-std", "futures 0.3.13", - "libp2p-core 0.28.1", + "libp2p-core", "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6149c46cb76935c80bc8be6ec6e3ebd5f5e1679765a255fb34331d54610f15dd" -dependencies = [ - "futures 0.3.13", - "js-sys", - "libp2p-core 0.27.1", - "parity-send-wrapper", - "wasm-bindgen", - "wasm-bindgen-futures", -] - -[[package]] -name = "libp2p-wasm-ext" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6df65fc13f6188edf7e6927b086330448b3ca27af86b49748c6d299d7c8d9040" +checksum = "cef45d61e43c313531b5e903e4e8415212ff6338e0c54c47da5b9b412b5760de" dependencies = [ "futures 0.3.13", "js-sys", - "libp2p-core 0.28.1", + "libp2p-core", "parity-send-wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] -[[package]] -name = "libp2p-websocket" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3b1c6a3431045da8b925ed83384e4c5163e14b990572307fca9c507435d4d22" -dependencies = [ - "either", - "futures 0.3.13", - "futures-rustls", - "libp2p-core 0.27.1", - "log", - "quicksink", - "rw-stream-sink", - "soketto", - "url 2.2.1", - "webpki-roots", -] - [[package]] name = "libp2p-websocket" version = "0.29.0" @@ -3927,7 +3545,7 @@ dependencies = [ "either", "futures 0.3.13", "futures-rustls", - "libp2p-core 0.28.1", + "libp2p-core", "log", "quicksink", "rw-stream-sink", @@ -3938,25 +3556,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.30.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" -dependencies = [ - "futures 0.3.13", - "libp2p-core 0.27.1", - "parking_lot 0.11.1", - "thiserror", - "yamux", -] - -[[package]] -name = "libp2p-yamux" -version = "0.31.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d6144cc94143fb0a8dd1e7c2fbcc32a2808168bcd1d69920635424d5993b7b" +checksum = "f35da42cfc6d5cb0dcf3ad6881bc68d146cdf38f98655e09e33fbba4d13eabc4" dependencies = [ "futures 0.3.13", - "libp2p-core 0.28.1", + "libp2p-core", "parking_lot 0.11.1", "thiserror", "yamux", @@ -3964,9 +3569,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "6.11.4" +version = "6.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb5b56f651c204634b936be2f92dbb42c36867e00ff7fe2405591f3b9fa66f09" +checksum = "5da125e1c0f22c7cae785982115523a0738728498547f415c9054cb17c7e89f9" dependencies = [ "bindgen", "cc", @@ -4213,15 +3818,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "minicbor" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c2b2c73f9640fccab53947e2b3474d5071fcbc8f82cac51ddf6c8041a30a9ea" -dependencies = [ - "minicbor-derive", -] - [[package]] name = "minicbor" version = "0.8.0" @@ -4501,7 +4097,7 @@ dependencies = [ "futures 0.3.13", "futures-timer 3.0.2", "jsonrpc-core", - "libp2p 0.36.0", + "libp2p", "node-cli", "sc-rpc-api", "serde", @@ -4522,7 +4118,7 @@ dependencies = [ "frame-system", "futures 0.3.13", "hex-literal", - "libp2p-wasm-ext 0.28.0", + "libp2p-wasm-ext", "log", "nix", "node-executor", @@ -4539,7 +4135,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "platforms", "rand 0.7.3", @@ -4599,6 +4195,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "futures 0.3.13", "node-primitives", "node-runtime", "node-testing", @@ -4611,7 +4208,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-executor", "sp-application-crypto", "sp-consensus-babe", @@ -4633,7 +4230,7 @@ version = "0.8.0" dependencies = [ "derive_more", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-cli", "sc-client-api", "sc-service", @@ -4648,8 +4245,8 @@ name = "node-primitives" version = "2.0.0" dependencies = [ "frame-system", - "parity-scale-codec 2.0.1", - "pretty_assertions", + "parity-scale-codec", + "pretty_assertions 0.6.1", "sp-application-crypto", "sp-core", "sp-runtime", @@ -4662,7 +4259,6 @@ version = "2.0.0" dependencies = [ "jsonrpc-core", "node-primitives", - "node-runtime", "pallet-contracts-rpc", "pallet-transaction-payment-rpc", "sc-chain-spec", @@ -4756,8 +4352,7 @@ dependencies = [ "pallet-treasury", "pallet-utility", "pallet-vesting", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-api", "sp-authority-discovery", "sp-block-builder", @@ -4801,7 +4396,7 @@ dependencies = [ "pallet-timestamp", "pallet-transaction-payment", "pallet-treasury", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-block-builder", "sc-cli", "sc-client-api", @@ -4909,20 +4504,14 @@ dependencies = [ [[package]] name = "object" -version = "0.22.0" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d3b63360ec3cb337817c2dbd47ab4a0f170d285d8e5a2064600f3def1402397" +checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" dependencies = [ "crc32fast", "indexmap", ] -[[package]] -name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - [[package]] name = "once_cell" version = "1.7.2" @@ -4982,8 +4571,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -4997,8 +4585,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5014,9 +4601,8 @@ dependencies = [ "lazy_static", "pallet-session", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", - "serde", "sp-application-crypto", "sp-consensus-aura", "sp-core", @@ -5032,8 +4618,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-application-crypto", "sp-authority-discovery", "sp-core", @@ -5050,7 +4635,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-authorship", "sp-core", @@ -5076,8 +4661,7 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-application-crypto", "sp-consensus-babe", "sp-consensus-vrf", @@ -5098,8 +4682,7 @@ dependencies = [ "frame-system", "log", "pallet-transaction-payment", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5115,8 +4698,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-treasury", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5126,11 +4708,11 @@ dependencies = [ [[package]] name = "pallet-cere-ddc" -version = "5.0.0" +version = "5.1.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5147,8 +4729,7 @@ dependencies = [ "frame-system", "lite-json", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-keystore", @@ -5166,8 +4747,7 @@ dependencies = [ "hex-literal", "log", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5189,20 +4769,20 @@ dependencies = [ "pallet-contracts-proc-macro", "pallet-randomness-collective-flip", "pallet-timestamp", - "parity-scale-codec 2.0.1", - "parity-wasm 0.41.0", + "parity-scale-codec", + "parity-wasm 0.42.2", "paste 1.0.4", - "pretty_assertions", - "pwasm-utils 0.16.0", - "rand 0.7.3", - "rand_pcg", + "pretty_assertions 0.7.2", + "pwasm-utils 0.17.0", + "rand 0.8.3", + "rand_pcg 0.3.0", "serde", "sp-core", "sp-io", "sp-runtime", "sp-sandbox", "sp-std", - "wasmi-validation", + "wasmi-validation 0.4.0", "wat", ] @@ -5211,7 +4791,9 @@ name = "pallet-contracts-primitives" version = "3.0.0" dependencies = [ "bitflags", - "parity-scale-codec 2.0.1", + "parity-scale-codec", + "serde", + "sp-core", "sp-runtime", "sp-std", ] @@ -5234,7 +4816,7 @@ dependencies = [ "jsonrpc-derive", "pallet-contracts-primitives", "pallet-contracts-rpc-runtime-api", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "serde_json 1.0.64", "sp-api", @@ -5249,7 +4831,7 @@ name = "pallet-contracts-rpc-runtime-api" version = "3.0.0" dependencies = [ "pallet-contracts-primitives", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-runtime", "sp-std", @@ -5269,8 +4851,8 @@ dependencies = [ "pallet-contracts", "pallet-randomness-collective-flip", "pallet-timestamp", - "parity-scale-codec 2.0.1", - "pretty_assertions", + "parity-scale-codec", + "pretty_assertions 0.6.1", "serde", "serde_json 1.0.44", "sp-core", @@ -5290,7 +4872,7 @@ dependencies = [ "hex-literal", "pallet-balances", "pallet-scheduler", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5311,11 +4893,10 @@ dependencies = [ "hex-literal", "log", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "paste 1.0.4", "rand 0.7.3", - "serde", "sp-arithmetic", "sp-core", "sp-io", @@ -5335,8 +4916,7 @@ dependencies = [ "frame-system", "hex-literal", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5345,7 +4925,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "3.0.0" +version = "4.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5353,8 +4933,7 @@ dependencies = [ "hex-literal", "log", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-npos-elections", @@ -5373,7 +4952,7 @@ dependencies = [ "pallet-balances", "pallet-chainbridge", "pallet-erc721", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-arithmetic", "sp-core", @@ -5391,7 +4970,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-chainbridge", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5401,14 +4980,14 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5421,7 +5000,7 @@ version = "2.0.1" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5438,8 +5017,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-arithmetic", "sp-core", "sp-io", @@ -5464,8 +5042,7 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-application-crypto", "sp-core", "sp-finality-grandpa", @@ -5486,8 +5063,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5504,8 +5080,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-application-crypto", "sp-core", "sp-io", @@ -5522,8 +5097,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-keyring", @@ -5540,7 +5114,7 @@ dependencies = [ "frame-support-test", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5552,10 +5126,11 @@ dependencies = [ name = "pallet-membership" version = "3.0.0" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 2.0.1", - "serde", + "log", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5573,8 +5148,7 @@ dependencies = [ "frame-system", "hex-literal", "pallet-mmr-primitives", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5589,7 +5163,7 @@ dependencies = [ "frame-system", "hex-literal", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-core", @@ -5605,7 +5179,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-mmr-primitives", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "serde_json 1.0.64", "sp-api", @@ -5623,8 +5197,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5638,8 +5211,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5653,8 +5225,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5669,7 +5240,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5695,7 +5266,7 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5713,8 +5284,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-utility", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5727,7 +5297,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "safe-mix", "serde", "sp-core", @@ -5744,8 +5314,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5760,8 +5329,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5776,8 +5344,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5793,8 +5360,7 @@ dependencies = [ "impl-trait-for-tuples", "lazy_static", "pallet-timestamp", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-application-crypto", "sp-core", "sp-io", @@ -5818,7 +5384,7 @@ dependencies = [ "pallet-staking", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand 0.7.3", "serde", "sp-core", @@ -5836,9 +5402,8 @@ dependencies = [ "frame-support-test", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand_chacha 0.2.2", - "serde", "sp-core", "sp-io", "sp-runtime", @@ -5860,7 +5425,7 @@ dependencies = [ "pallet-session", "pallet-staking-reward-curve", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "paste 1.0.4", "rand_chacha 0.2.2", @@ -5902,8 +5467,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -5919,8 +5483,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-inherents", "sp-io", @@ -5938,7 +5501,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-treasury", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -5954,7 +5517,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "serde_json 1.0.64", "smallvec 1.6.1", @@ -5973,7 +5536,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-blockchain", "sp-core", @@ -5986,7 +5549,7 @@ name = "pallet-transaction-payment-rpc-runtime-api" version = "3.0.0" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-runtime", ] @@ -6000,7 +5563,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -6017,8 +5580,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -6035,8 +5597,7 @@ dependencies = [ "frame-system", "hex-literal", "pallet-balances", - "parity-scale-codec 2.0.1", - "serde", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -6081,34 +5642,22 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4b26b16c7687c3075982af47719e481815df30bc544f7a6690763a25ca16e9d" -dependencies = [ - "arrayvec 0.5.2", - "bitvec 0.17.4", - "byte-slice-cast 0.3.5", - "serde", -] - -[[package]] -name = "parity-scale-codec" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd3dab59b5cf4bc81069ade0fc470341a1ef3ad5fa73e5a8943bed2ec12b2e8" +checksum = "731f4d179ed52b1c7eeb29baf29c604ea9301b889b23ce93660220a5465d5c6f" dependencies = [ - "arrayvec 0.5.2", - "bitvec 0.20.2", - "byte-slice-cast 1.0.0", + "arrayvec 0.7.0", + "bitvec", + "byte-slice-cast", "parity-scale-codec-derive", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa04976a81fde04924b40cc4036c4d12841e8bb04325a5cf2ada75731a150a7d" +checksum = "f44c5f94427bd0b5076e8f7e15ca3f60a4d8ac0077e4793884e6fdfd8915344e" dependencies = [ "proc-macro-crate 0.1.5", "proc-macro2", @@ -6183,6 +5732,12 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" +[[package]] +name = "parity-wasm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" + [[package]] name = "parity-ws" version = "0.10.0" @@ -6579,6 +6134,18 @@ dependencies = [ "output_vt100", ] +[[package]] +name = "pretty_assertions" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +dependencies = [ + "ansi_term 0.12.1", + "ctor", + "diff", + "output_vt100", +] + [[package]] name = "primitive-types" version = "0.9.0" @@ -6648,9 +6215,9 @@ checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] name = "proc-macro2" -version = "1.0.24" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0704ee1a7e00d7bb417d0770ea303c1bccbabf0ef1667dae92b5967f5f8a71" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ "unicode-xid", ] @@ -6694,7 +6261,7 @@ dependencies = [ "prost", "prost-types", "tempfile", - "which 4.0.2", + "which", ] [[package]] @@ -6742,13 +6309,13 @@ dependencies = [ [[package]] name = "pwasm-utils" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c8ac87af529432d3a4f0e2b3bbf08af49f28f09cc73ed7e551161bdaef5f78d" +checksum = "51992bc74c0f34f759ff97fb303602e60343afc83693769c91aa17724442809e" dependencies = [ "byteorder", "log", - "parity-wasm 0.41.0", + "parity-wasm 0.42.2", ] [[package]] @@ -6794,12 +6361,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - [[package]] name = "radium" version = "0.6.2" @@ -6840,7 +6401,7 @@ dependencies = [ "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc 0.2.0", - "rand_pcg", + "rand_pcg 0.2.1", ] [[package]] @@ -6945,14 +6506,12 @@ dependencies = [ ] [[package]] -name = "raw-cpuid" -version = "8.1.2" +name = "rand_pcg" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fdf7d9dbd43f3d81d94a49c1c3df73cc2b3827995147e6cf7f89d4ec5483e73" +checksum = "7de198537002b913568a3847e53535ace266f93526caf5c360ec41d72c5787f0" dependencies = [ - "bitflags", - "cc", - "rustc_version", + "rand_core 0.6.2", ] [[package]] @@ -7059,6 +6618,7 @@ checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ "log", "rustc-hash", + "serde", "smallvec 1.6.1", ] @@ -7110,9 +6670,8 @@ dependencies = [ "hex-literal", "jsonrpsee-http-client", "jsonrpsee-proc-macros", - "jsonrpsee-types", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-io", "sp-runtime", @@ -7262,6 +6821,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb5d2a036dc6d2d8fd16fde3498b04306e29bd193bf306a57427019b823d5acd" +[[package]] +name = "ruzstd" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d425143485a37727c7a46e689bbe3b883a00f42b4a52c4ac0f44855c1009b00" +dependencies = [ + "byteorder", + "twox-hash", +] + [[package]] name = "rw-stream-sink" version = "0.2.1" @@ -7315,9 +6884,10 @@ dependencies = [ "either", "futures 0.3.13", "futures-timer 3.0.2", - "libp2p 0.36.0", + "ip_network", + "libp2p", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "prost", "prost-build", "quickcheck", @@ -7344,7 +6914,7 @@ dependencies = [ "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -7366,7 +6936,7 @@ dependencies = [ name = "sc-block-builder" version = "0.9.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sp-api", "sp-block-builder", @@ -7384,7 +6954,7 @@ name = "sc-chain-spec" version = "3.0.0" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-chain-spec-derive", "sc-consensus-babe", "sc-consensus-epochs", @@ -7417,10 +6987,10 @@ dependencies = [ "fdlimit", "futures 0.3.13", "hex", - "libp2p 0.36.0", + "libp2p", "log", "names", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand 0.7.3", "regex", "rpassword", @@ -7459,7 +7029,7 @@ dependencies = [ "kvdb-memorydb", "lazy_static", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-executor", "sp-api", @@ -7496,7 +7066,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "quickcheck", @@ -7522,6 +7092,7 @@ dependencies = [ name = "sc-consensus" version = "0.9.0" dependencies = [ + "parking_lot 0.11.1", "sc-client-api", "sp-blockchain", "sp-consensus", @@ -7532,12 +7103,13 @@ dependencies = [ name = "sc-consensus-aura" version = "0.9.0" dependencies = [ + "async-trait", "derive_more", "futures 0.3.13", "futures-timer 3.0.2", "getrandom 0.2.2", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -7573,6 +7145,7 @@ dependencies = [ name = "sc-consensus-babe" version = "0.9.0" dependencies = [ + "async-trait", "derive_more", "fork-tree", "futures 0.3.13", @@ -7582,7 +7155,7 @@ dependencies = [ "num-bigint", "num-rational", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "pdqselect", "rand 0.7.3", @@ -7658,9 +7231,9 @@ name = "sc-consensus-epochs" version = "0.9.0" dependencies = [ "fork-tree", - "parity-scale-codec 2.0.1", - "parking_lot 0.11.1", + "parity-scale-codec", "sc-client-api", + "sc-consensus", "sp-blockchain", "sp-runtime", ] @@ -7670,13 +7243,14 @@ name = "sc-consensus-manual-seal" version = "0.9.0" dependencies = [ "assert_matches", + "async-trait", "derive_more", "futures 0.3.13", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-basic-authorship", "sc-client-api", @@ -7707,11 +7281,12 @@ dependencies = [ name = "sc-consensus-pow" version = "0.9.0" dependencies = [ + "async-trait", "derive_more", "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-client-api", "sp-api", @@ -7730,11 +7305,11 @@ dependencies = [ name = "sc-consensus-slots" version = "0.9.0" dependencies = [ + "async-trait", "futures 0.3.13", "futures-timer 3.0.2", "log", - "parity-scale-codec 2.0.1", - "parking_lot 0.11.1", + "parity-scale-codec", "sc-client-api", "sc-telemetry", "sp-api", @@ -7776,7 +7351,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-wasm 0.41.0", "parking_lot 0.11.1", "paste 1.0.4", @@ -7789,6 +7364,7 @@ dependencies = [ "sp-core", "sp-externalities", "sp-io", + "sp-maybe-compressed-blob", "sp-panic-handler", "sp-runtime", "sp-runtime-interface", @@ -7812,8 +7388,9 @@ name = "sc-executor-common" version = "0.9.0" dependencies = [ "derive_more", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-wasm 0.41.0", + "pwasm-utils 0.14.0", "sp-allocator", "sp-core", "sp-serializer", @@ -7827,7 +7404,7 @@ name = "sc-executor-wasmi" version = "0.9.0" dependencies = [ "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-executor-common", "sp-allocator", "sp-core", @@ -7842,7 +7419,7 @@ version = "0.9.0" dependencies = [ "assert_matches", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-wasm 0.41.0", "pwasm-utils 0.14.0", "sc-executor-common", @@ -7859,6 +7436,7 @@ name = "sc-finality-grandpa" version = "0.9.0" dependencies = [ "assert_matches", + "async-trait", "derive_more", "dyn-clone", "finality-grandpa", @@ -7867,7 +7445,7 @@ dependencies = [ "futures-timer 3.0.2", "linked-hash-map", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.5", "rand 0.7.3", @@ -7914,7 +7492,7 @@ dependencies = [ "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-finality-grandpa", @@ -7940,7 +7518,7 @@ dependencies = [ "futures 0.3.13", "log", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "prost", "rand 0.8.3", @@ -8000,7 +7578,7 @@ version = "3.0.0" dependencies = [ "hash-db", "lazy_static", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-client-api", "sc-executor", @@ -8033,13 +7611,13 @@ dependencies = [ "futures-timer 3.0.2", "hex", "ip_network", - "libp2p 0.36.0", + "libp2p", "linked-hash-map", "linked_hash_set", "log", "lru", "nohash-hasher", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.5", "prost", @@ -8079,7 +7657,7 @@ dependencies = [ "async-std", "futures 0.3.13", "futures-timer 3.0.2", - "libp2p 0.36.0", + "libp2p", "log", "lru", "quickcheck", @@ -8097,9 +7675,10 @@ name = "sc-network-test" version = "0.8.0" dependencies = [ "async-std", + "async-trait", "futures 0.3.13", "futures-timer 3.0.2", - "libp2p 0.36.0", + "libp2p", "log", "parking_lot 0.11.1", "rand 0.7.3", @@ -8133,7 +7712,7 @@ dependencies = [ "lazy_static", "log", "num_cpus", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "rand 0.7.3", "sc-block-builder", @@ -8160,7 +7739,7 @@ name = "sc-peerset" version = "3.0.0" dependencies = [ "futures 0.3.13", - "libp2p 0.36.0", + "libp2p", "log", "rand 0.7.3", "serde_json 1.0.64", @@ -8188,7 +7767,7 @@ dependencies = [ "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-block-builder", "sc-cli", @@ -8229,7 +7808,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "serde", "serde_json 1.0.64", @@ -8277,6 +7856,7 @@ name = "sc-service" version = "0.9.0" dependencies = [ "async-std", + "async-trait", "directories", "exit-future", "futures 0.1.31", @@ -8287,7 +7867,7 @@ dependencies = [ "jsonrpc-pubsub", "lazy_static", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "pin-project 1.0.5", @@ -8350,7 +7930,7 @@ dependencies = [ "futures 0.3.13", "hex-literal", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", @@ -8382,7 +7962,7 @@ name = "sc-state-db" version = "0.9.0" dependencies = [ "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.11.1", @@ -8416,7 +7996,7 @@ version = "3.0.0" dependencies = [ "chrono", "futures 0.3.13", - "libp2p 0.36.0", + "libp2p", "log", "parking_lot 0.11.1", "pin-project 1.0.5", @@ -8475,7 +8055,7 @@ dependencies = [ "futures 0.3.13", "linked-hash-map", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "retain_mut", @@ -8500,7 +8080,7 @@ dependencies = [ "hex", "intervalier", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", "sc-block-builder", @@ -8938,7 +8518,7 @@ version = "3.0.0" dependencies = [ "hash-db", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -8966,7 +8546,7 @@ version = "2.0.1" dependencies = [ "criterion", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rustversion", "sc-block-builder", "sp-api", @@ -8985,7 +8565,7 @@ dependencies = [ name = "sp-application-crypto" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-core", "sp-io", @@ -9011,13 +8591,14 @@ dependencies = [ "criterion", "integer-sqrt", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "primitive-types", "rand 0.7.3", "serde", "serde_json 1.0.64", "sp-debug-derive", "sp-std", + "static_assertions", ] [[package]] @@ -9035,7 +8616,7 @@ dependencies = [ name = "sp-authority-discovery" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-application-crypto", "sp-runtime", @@ -9046,7 +8627,7 @@ dependencies = [ name = "sp-authorship" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-inherents", "sp-runtime", "sp-std", @@ -9056,7 +8637,7 @@ dependencies = [ name = "sp-block-builder" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", @@ -9070,7 +8651,7 @@ dependencies = [ "futures 0.3.13", "log", "lru", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sp-api", "sp-consensus", @@ -9092,11 +8673,12 @@ dependencies = [ name = "sp-consensus" version = "0.9.0" dependencies = [ + "async-trait", "futures 0.3.13", "futures-timer 3.0.2", - "libp2p 0.36.0", + "libp2p", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "serde", "sp-api", @@ -9118,7 +8700,7 @@ dependencies = [ name = "sp-consensus-aura" version = "0.9.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-application-crypto", "sp-consensus", @@ -9134,7 +8716,7 @@ name = "sp-consensus-babe" version = "0.9.0" dependencies = [ "merlin", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-application-crypto", @@ -9153,7 +8735,7 @@ dependencies = [ name = "sp-consensus-pow" version = "0.9.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-core", "sp-runtime", @@ -9164,7 +8746,7 @@ dependencies = [ name = "sp-consensus-slots" version = "0.9.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-arithmetic", "sp-runtime", ] @@ -9173,7 +8755,7 @@ dependencies = [ name = "sp-consensus-vrf" version = "0.9.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "schnorrkel", "sp-core", "sp-runtime", @@ -9201,10 +8783,10 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", - "pretty_assertions", + "pretty_assertions 0.6.1", "primitive-types", "rand 0.7.3", "rand_chacha 0.2.2", @@ -9251,7 +8833,7 @@ name = "sp-externalities" version = "0.9.0" dependencies = [ "environmental", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-std", "sp-storage", ] @@ -9262,7 +8844,7 @@ version = "3.0.0" dependencies = [ "finality-grandpa", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-application-crypto", @@ -9276,7 +8858,7 @@ dependencies = [ name = "sp-inherents" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sp-core", "sp-std", @@ -9291,7 +8873,7 @@ dependencies = [ "hash-db", "libsecp256k1", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sp-core", "sp-externalities", @@ -9324,7 +8906,7 @@ dependencies = [ "derive_more", "futures 0.3.13", "merlin", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "rand 0.7.3", "rand_chacha 0.2.2", @@ -9334,11 +8916,19 @@ dependencies = [ "sp-externalities", ] +[[package]] +name = "sp-maybe-compressed-blob" +version = "3.0.0" +dependencies = [ + "ruzstd", + "zstd", +] + [[package]] name = "sp-npos-elections" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand 0.7.3", "serde", "sp-arithmetic", @@ -9353,10 +8943,14 @@ dependencies = [ name = "sp-npos-elections-compact" version = "3.0.0" dependencies = [ + "parity-scale-codec", "proc-macro-crate 1.0.0", "proc-macro2", "quote", + "sp-arithmetic", + "sp-npos-elections", "syn", + "trybuild", ] [[package]] @@ -9364,7 +8958,7 @@ name = "sp-npos-elections-fuzzer" version = "2.0.0-alpha.5" dependencies = [ "honggfuzz", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "rand 0.7.3", "sp-arithmetic", "sp-npos-elections", @@ -9407,7 +9001,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "paste 1.0.4", "rand 0.7.3", @@ -9429,7 +9023,7 @@ name = "sp-runtime-interface" version = "3.0.0" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "primitive-types", "rustversion", "sp-core", @@ -9500,7 +9094,7 @@ name = "sp-sandbox" version = "0.9.0" dependencies = [ "assert_matches", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-io", "sp-std", @@ -9521,7 +9115,7 @@ dependencies = [ name = "sp-session" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-core", "sp-runtime", @@ -9533,7 +9127,7 @@ dependencies = [ name = "sp-staking" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-runtime", "sp-std", ] @@ -9546,9 +9140,9 @@ dependencies = [ "hex-literal", "log", "num-traits", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", - "pretty_assertions", + "pretty_assertions 0.6.1", "rand 0.7.3", "smallvec 1.6.1", "sp-core", @@ -9571,7 +9165,7 @@ name = "sp-storage" version = "3.0.0" dependencies = [ "impl-serde", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "ref-cast", "serde", "sp-debug-derive", @@ -9583,7 +9177,7 @@ name = "sp-tasks" version = "3.0.0" dependencies = [ "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-externalities", "sp-io", @@ -9595,7 +9189,7 @@ dependencies = [ name = "sp-test-primitives" version = "2.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "serde", "sp-application-crypto", @@ -9607,7 +9201,7 @@ dependencies = [ name = "sp-timestamp" version = "3.0.0" dependencies = [ - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-api", "sp-inherents", "sp-runtime", @@ -9620,7 +9214,7 @@ name = "sp-tracing" version = "3.0.0" dependencies = [ "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-std", "tracing", "tracing-core", @@ -9634,7 +9228,7 @@ dependencies = [ "derive_more", "futures 0.3.13", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-api", "sp-blockchain", @@ -9650,7 +9244,7 @@ dependencies = [ "hash-db", "hex-literal", "memory-db", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-core", "sp-runtime", "sp-std", @@ -9676,7 +9270,7 @@ name = "sp-version" version = "3.0.0" dependencies = [ "impl-serde", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "serde", "sp-runtime", "sp-std", @@ -9687,7 +9281,7 @@ name = "sp-wasm-interface" version = "3.0.0" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sp-std", "wasmi", ] @@ -9822,7 +9416,7 @@ dependencies = [ "getrandom 0.2.2", "js-sys", "kvdb-web", - "libp2p-wasm-ext 0.28.0", + "libp2p-wasm-ext", "log", "rand 0.7.3", "sc-chain-spec", @@ -9846,6 +9440,7 @@ dependencies = [ name = "substrate-frame-cli" version = "3.0.0" dependencies = [ + "frame-support", "frame-system", "sc-cli", "sp-core", @@ -9862,7 +9457,7 @@ dependencies = [ "futures 0.3.13", "jsonrpc-client-transports", "jsonrpc-core", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-rpc-api", "serde", "sp-storage", @@ -9879,7 +9474,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sc-rpc-api", "sc-transaction-pool", @@ -9911,11 +9506,12 @@ dependencies = [ name = "substrate-test-client" version = "2.0.1" dependencies = [ + "async-trait", "futures 0.1.31", "futures 0.3.13", "hash-db", "hex", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-client-api", "sc-client-db", "sc-consensus", @@ -9942,11 +9538,12 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", + "futures 0.3.13", "log", "memory-db", "pallet-babe", "pallet-timestamp", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parity-util-mem", "sc-block-builder", "sc-executor", @@ -9982,7 +9579,7 @@ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ "futures 0.3.13", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -10003,7 +9600,7 @@ version = "2.0.0" dependencies = [ "derive_more", "futures 0.3.13", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "parking_lot 0.11.1", "sc-transaction-graph", "sp-blockchain", @@ -10049,6 +9646,7 @@ dependencies = [ "atty", "build-helper", "cargo_metadata", + "sp-maybe-compressed-blob", "tempfile", "toml", "walkdir", @@ -10069,9 +9667,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.62" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "123a78a3596b24fee53a6464ce52d8ecbf62241e6294c7e7fe12086cd161f512" +checksum = "48fe99c6bd8b1cc636890bcc071842de909d902c81ac7dab53ba33c421ab8ffb" dependencies = [ "proc-macro2", "quote", @@ -10159,21 +9757,14 @@ version = "0.9.0" dependencies = [ "env_logger 0.7.1", "frame-system", - "futures 0.1.31", "futures 0.3.13", "jsonrpc-core", - "libp2p 0.35.1", "log", - "node-cli", - "parity-scale-codec 1.3.7", - "rand 0.7.3", "sc-basic-authorship", "sc-cli", "sc-client-api", - "sc-consensus-babe", "sc-consensus-manual-seal", "sc-executor", - "sc-finality-grandpa", "sc-informant", "sc-network", "sc-rpc", @@ -10185,7 +9776,6 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-consensus", - "sp-consensus-babe", "sp-core", "sp-externalities", "sp-inherents", @@ -10216,7 +9806,6 @@ dependencies = [ "pallet-balances", "pallet-sudo", "pallet-transaction-payment", - "rand 0.8.3", "sc-client-api", "sc-consensus", "sc-consensus-babe", @@ -10738,7 +10327,7 @@ dependencies = [ "hash-db", "keccak-hasher", "memory-db", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "trie-db", "trie-root", "trie-standardmap", @@ -10831,7 +10420,7 @@ version = "0.9.0" dependencies = [ "frame-try-runtime", "log", - "parity-scale-codec 2.0.1", + "parity-scale-codec", "remote-externalities", "sc-cli", "sc-client-api", @@ -11110,9 +10699,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee1280240b7c461d6a0071313e08f34a60b0365f14260362e5a2b17d1d31aa7" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ "cfg-if 1.0.0", "serde", @@ -11122,9 +10711,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b7d8b6942b8bb3a9b0e73fc79b98095a27de6fa247615e59d096754a3bc2aa8" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", @@ -11149,9 +10738,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ac38da8ef716661f0f36c0d8320b89028efe10c7c0afde65baffb496ce0d3b" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11159,9 +10748,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc053ec74d454df287b9374ee8abb36ffd5acb95ba87da3ba5b7d3fe20eb401e" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -11172,9 +10761,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.71" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6f8ec44822dd71f5f221a5847fb34acd9060535c1211b70a05844c0f6383b1" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "wasm-bindgen-test" @@ -11238,7 +10827,7 @@ dependencies = [ "num-rational", "num-traits", "parity-wasm 0.41.0", - "wasmi-validation", + "wasmi-validation 0.3.0", ] [[package]] @@ -11250,17 +10839,26 @@ dependencies = [ "parity-wasm 0.41.0", ] +[[package]] +name = "wasmi-validation" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb8e860796d8be48efef530b60eebf84e74a88bce107374fffb0da97d504b8" +dependencies = [ + "parity-wasm 0.42.2", +] + [[package]] name = "wasmparser" -version = "0.71.0" +version = "0.76.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a30c99437829ede826802bfcf28500cf58df00e66cb9114df98813bc145ff1" +checksum = "755a9a4afe3f6cccbbe6d7e965eef44cf260b001f93e547eba84255c1d0187d8" [[package]] name = "wasmtime" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7426055cb92bd9a1e9469b48154d8d6119cd8c498c8b70284e420342c05dc45d" +checksum = "718cb52a9fdb7ab12471e9b9d051c9adfa6b5c504e0a1fea045e5eabc81eedd9" dependencies = [ "anyhow", "backtrace", @@ -11270,6 +10868,7 @@ dependencies = [ "indexmap", "libc", "log", + "paste 1.0.4", "region", "rustc-demangle", "serde", @@ -11278,6 +10877,7 @@ dependencies = [ "wasmparser", "wasmtime-cache", "wasmtime-environ", + "wasmtime-fiber", "wasmtime-jit", "wasmtime-profiling", "wasmtime-runtime", @@ -11287,9 +10887,9 @@ dependencies = [ [[package]] name = "wasmtime-cache" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c01d9287e36921e46f5887a47007824ae5dbb9b7517a2d565660ab4471478709" +checksum = "1f984df56c4adeba91540f9052db9f7a8b3b00cfaac1a023bee50a972f588b0c" dependencies = [ "anyhow", "base64 0.13.0", @@ -11308,27 +10908,28 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4134ed3a4316cd0de0e546c6004850afe472b0fa3fcdc2f2c15f8d449562d962" +checksum = "2a05abbf94e03c2c8ee02254b1949320c4d45093de5d9d6ed4d9351d536075c9" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", "cranelift-wasm", + "wasmparser", "wasmtime-environ", ] [[package]] name = "wasmtime-debug" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91fa931df6dd8af2b02606307674d3bad23f55473d5f4c809dddf7e4c4dc411" +checksum = "382eecd6281c6c1d1f3c904c3c143e671fc1a9573820cbfa777fba45ce2eda9c" dependencies = [ "anyhow", "gimli", "more-asserts", - "object 0.22.0", + "object", "target-lexicon", "thiserror", "wasmparser", @@ -11337,9 +10938,9 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1098871dc3120aaf8190d79153e470658bb79f63ee9ca31716711e123c28220" +checksum = "81011b2b833663d7e0ce34639459a0e301e000fc7331e0298b3a27c78d0cec60" dependencies = [ "anyhow", "cfg-if 1.0.0", @@ -11355,11 +10956,22 @@ dependencies = [ "wasmparser", ] +[[package]] +name = "wasmtime-fiber" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d92da32e31af2e3d828f485f5f24651ed4d3b7f03a46ea6555eae6940d1402cd" +dependencies = [ + "cc", + "libc", + "winapi 0.3.9", +] + [[package]] name = "wasmtime-jit" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738bfcd1561ede8bb174215776fd7d9a95d5f0a47ca3deabe0282c55f9a89f68" +checksum = "9b5f649623859a12d361fe4cc4793de44f7c3ff34c322c5714289787e89650bb" dependencies = [ "addr2line", "anyhow", @@ -11372,7 +10984,7 @@ dependencies = [ "gimli", "log", "more-asserts", - "object 0.22.0", + "object", "rayon", "region", "serde", @@ -11390,13 +11002,13 @@ dependencies = [ [[package]] name = "wasmtime-obj" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e96d77f1801131c5e86d93e42a3cf8a35402107332c202c245c83f34888a906" +checksum = "ef2e99cd9858f57fd062e9351e07881cedfc8597928385e02a48d9333b9e15a1" dependencies = [ "anyhow", "more-asserts", - "object 0.22.0", + "object", "target-lexicon", "wasmtime-debug", "wasmtime-environ", @@ -11404,16 +11016,16 @@ dependencies = [ [[package]] name = "wasmtime-profiling" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60bb672c9d894776d7b9250dd9b4fe890f8760201ee4f53e5f2da772b6c4debb" +checksum = "e46c0a590e49278ba7f79ef217af9db4ecc671b50042c185093e22d73524abb2" dependencies = [ "anyhow", "cfg-if 1.0.0", "gimli", "lazy_static", "libc", - "object 0.22.0", + "object", "scroll", "serde", "target-lexicon", @@ -11423,9 +11035,9 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "0.22.0" +version = "0.24.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a978086740949eeedfefcee667b57a9e98d9a7fc0de382fcfa0da30369e3530d" +checksum = "1438a09185fc7ca067caf1a80d7e5b398eefd4fb7630d94841448ade60feb3d0" dependencies = [ "backtrace", "cc", @@ -11499,15 +11111,6 @@ dependencies = [ "cc", ] -[[package]] -name = "which" -version = "3.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" -dependencies = [ - "libc", -] - [[package]] name = "which" version = "4.0.2" @@ -11605,15 +11208,15 @@ dependencies = [ [[package]] name = "yamux" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" +checksum = "e7d9028f208dd5e63c614be69f115c1b53cacc1111437d4c765185856666c107" dependencies = [ "futures 0.3.13", "log", "nohash-hasher", "parking_lot 0.11.1", - "rand 0.7.3", + "rand 0.8.3", "static_assertions", ] @@ -11640,18 +11243,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.6.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "3.0.1+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" dependencies = [ "libc", "zstd-sys", @@ -11659,12 +11262,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.4.20+zstd.1.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" dependencies = [ "cc", - "glob", - "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 4ce9939227a0a..8d24238231fab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,6 @@ [workspace] +resolver = "2" + members = [ "bin/node/bench", "bin/node/browser-testing", @@ -150,6 +152,7 @@ members = [ "primitives/io", "primitives/keyring", "primitives/keystore", + "primitives/maybe-compressed-blob", "primitives/npos-elections", "primitives/npos-elections/compact", "primitives/npos-elections/fuzzer", diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 8469ec62893b5..6524662317148 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -171,6 +171,7 @@ impl core::Benchmark for ConstructionBenchmark { inherent_data_providers.create_inherent_data().expect("Create inherent data failed"), Default::default(), std::time::Duration::from_secs(20), + None, ), ).map(|r| r.block).expect("Proposing failed"); diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 292ee2cab6bf7..93bf8f5131e3f 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,11 +8,11 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.36.0", default-features = false } +libp2p = { version = "0.37.1", default-features = false } jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" -wasm-bindgen = { version = "=0.2.71", features = ["serde-serialize"] } +wasm-bindgen = { version = "=0.2.73", features = ["serde-serialize"] } wasm-bindgen-futures = "0.4.18" wasm-bindgen-test = "0.3.18" futures = "0.3.9" diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index d3361949b19ce..74c5cdaadf4ec 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -102,7 +102,7 @@ node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.9.0", optional = true, path = "../../../utils/frame/try-runtime/cli" } # WASM-specific dependencies -wasm-bindgen = { version = "0.2.57", optional = true } +wasm-bindgen = { version = "0.2.73", optional = true } wasm-bindgen-futures = { version = "0.4.18", optional = true } browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.9.0"} libp2p-wasm-ext = { version = "0.28", features = ["websocket"], optional = true } @@ -116,7 +116,7 @@ sp-trie = { version = "3.0.0", default-features = false, path = "../../../primit [dev-dependencies] sc-keystore = { version = "3.0.0", path = "../../../client/keystore" } sc-consensus = { version = "0.9.0", path = "../../../client/consensus/common" } -sc-consensus-babe = { version = "0.9.0", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } sc-consensus-epochs = { version = "0.9.0", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } futures = "0.3.9" diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 7c5277c2cd07a..007d8c0969bb2 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -23,9 +23,9 @@ use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; use serde::{Serialize, Deserialize}; use node_runtime::{ AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, CouncilConfig, - DemocracyConfig,GrandpaConfig, ImOnlineConfig, SessionConfig, SessionKeys, StakerStatus, + DemocracyConfig, GrandpaConfig, ImOnlineConfig, SessionConfig, SessionKeys, StakerStatus, StakingConfig, ElectionsConfig, IndicesConfig, SocietyConfig, SudoConfig, SystemConfig, - TechnicalCommitteeConfig, wasm_binary_unwrap, + TechnicalCommitteeConfig, wasm_binary_unwrap, MAX_NOMINATIONS, }; use node_runtime::Block; use node_runtime::constants::currency::*; @@ -146,12 +146,7 @@ fn staging_testnet_config_genesis() -> GenesisConfig { let endowed_accounts: Vec = vec![root_key.clone()]; - testnet_genesis( - initial_authorities, - root_key, - Some(endowed_accounts), - false, - ) + testnet_genesis(initial_authorities, vec![], root_key, Some(endowed_accounts), false) } /// Staging testnet config. @@ -214,6 +209,7 @@ pub fn testnet_genesis( ImOnlineId, AuthorityDiscoveryId, )>, + initial_nominators: Vec, root_key: AccountId, endowed_accounts: Option>, enable_println: bool, @@ -234,11 +230,31 @@ pub fn testnet_genesis( get_account_id_from_seed::("Ferdie//stash"), ] }); - initial_authorities.iter().for_each(|x| - if !endowed_accounts.contains(&x.0) { - endowed_accounts.push(x.0.clone()) + // endow all authorities and nominators. + initial_authorities.iter().map(|x| &x.0).chain(initial_nominators.iter()).for_each(|x| { + if !endowed_accounts.contains(&x) { + endowed_accounts.push(x.clone()) } - ); + }); + + // stakers: all validators and nominators. + let mut rng = rand::thread_rng(); + let stakers = initial_authorities + .iter() + .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) + .chain(initial_nominators.iter().map(|x| { + use rand::{seq::SliceRandom, Rng}; + let limit = (MAX_NOMINATIONS as usize).min(initial_authorities.len()); + let count = rng.gen::() % limit; + let nominations = initial_authorities + .as_slice() + .choose_multiple(&mut rng, count) + .into_iter() + .map(|choice| choice.0.clone()) + .collect::>(); + (x.clone(), x.clone(), STASH, StakerStatus::Nominator(nominations)) + })) + .collect::>(); let num_endowed_accounts = endowed_accounts.len(); @@ -271,11 +287,9 @@ pub fn testnet_genesis( pallet_staking: StakingConfig { validator_count: initial_authorities.len() as u32 * 2, minimum_validator_count: initial_authorities.len() as u32, - stakers: initial_authorities.iter().map(|x| { - (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator) - }).collect(), invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), slash_reward_fraction: Perbill::from_percent(10), + stakers, .. Default::default() }, pallet_democracy: DemocracyConfig::default(), @@ -334,6 +348,7 @@ fn development_config_genesis() -> GenesisConfig { vec![ authority_keys_from_seed("Alice"), ], + vec![], get_account_id_from_seed::("Alice"), None, true, @@ -361,6 +376,7 @@ fn local_testnet_genesis() -> GenesisConfig { authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob"), ], + vec![], get_account_id_from_seed::("Alice"), None, false, @@ -394,6 +410,7 @@ pub(crate) mod tests { vec![ authority_keys_from_seed("Alice"), ], + vec![], get_account_id_from_seed::("Alice"), None, false, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 1351782315be7..b00451267d96c 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -218,6 +218,7 @@ pub fn new_full_base( } = new_partial(&config)?; let shared_voter_state = rpc_setup; + let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); @@ -320,7 +321,11 @@ pub fn new_full_base( Event::Dht(e) => Some(e), _ => None, }}); - let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service( + let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service_with_config( + sc_authority_discovery::WorkerConfig { + publish_non_global_ips: auth_disc_publish_non_global_ips, + ..Default::default() + }, client.clone(), network.clone(), Box::pin(dht_event_stream), @@ -534,7 +539,7 @@ pub fn new_light( #[cfg(test)] mod tests { - use std::{sync::Arc, borrow::Cow, any::Any, convert::TryInto}; + use std::{sync::Arc, borrow::Cow, convert::TryInto}; use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; use sp_consensus::{ @@ -638,27 +643,34 @@ mod tests { None, ); - let epoch_descriptor = babe_link.epoch_changes().lock().epoch_descriptor_for_child_of( - descendent_query(&*service.client()), - &parent_hash, - parent_number, - slot.into(), - ).unwrap().unwrap(); - let mut digest = Digest::::default(); // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot * SLOT_DURATION)); - if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( + let (babe_pre_digest, epoch_descriptor) = loop { + inherent_data.replace_data( + sp_timestamp::INHERENT_IDENTIFIER, + &(slot * SLOT_DURATION), + ); + + let epoch_descriptor = babe_link.epoch_changes().shared_data().epoch_descriptor_for_child_of( + descendent_query(&*service.client()), + &parent_hash, + parent_number, + slot.into(), + ).unwrap().unwrap(); + + let epoch = babe_link.epoch_changes().shared_data().epoch_data( + &epoch_descriptor, + |slot| sc_consensus_babe::Epoch::genesis(&babe_link.config(), slot), + ).unwrap(); + + if let Some(babe_pre_digest) = sc_consensus_babe::authorship::claim_slot( slot.into(), - &parent_header, - &*service.client(), - keystore.clone(), - &babe_link, - ) { - break babe_pre_digest; + &epoch, + &keystore, + ).map(|(digest, _)| digest) { + break (babe_pre_digest, epoch_descriptor) } slot += 1; @@ -672,6 +684,7 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), + None, ).await }).expect("Error making test block").block; @@ -696,11 +709,11 @@ mod tests { params.body = Some(new_body); params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - block_import.import_block(params, Default::default()) + futures::executor::block_on(block_import.import_block(params, Default::default())) .expect("error importing test block"); }, |service, _| { diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index fb7fc9191141c..54a44d59c2591 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -44,6 +44,7 @@ sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-externalities = { version = "0.9.0", path = "../../../primitives/externalities" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } wat = "1.0" +futures = "0.3.9" [features] wasmtime = [ diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 769b259461342..36925b38c0729 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -841,5 +841,5 @@ fn should_import_block_with_test_client() { let block_data = block1.0; let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap(); - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index ad24db03f9832..5474adbd32a89 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -21,7 +21,7 @@ use frame_support::{ weights::{GetDispatchInfo, constants::ExtrinsicBaseWeight, IdentityFee, WeightToFeePolynomial}, }; use sp_core::NeverNativeValue; -use sp_runtime::{Perbill, FixedPointNumber}; +use sp_runtime::{Perbill, traits::One}; use node_runtime::{ CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, Multiplier, TransactionByteFee, diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 1689d0e8247f8..f28aedf7f5a08 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -13,7 +13,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] jsonrpc-core = "15.1.0" node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } pallet-contracts-rpc = { version = "3.0.0", path = "../../../frame/contracts/rpc/" } pallet-transaction-payment-rpc = { version = "3.0.0", path = "../../../frame/transaction-payment/rpc/" } sc-client-api = { version = "3.0.0", path = "../../../client/api" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index e68ca6843bc94..019e548d82787 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -115,7 +115,7 @@ pub fn create_full( C: ProvideRuntimeApi + HeaderBackend + AuxStore + HeaderMetadata + Sync + Send + 'static, C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_contracts_rpc::ContractsRuntimeApi, C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, C::Api: BabeApi, C::Api: BlockBuilder, diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 4208da4e9323e..d67f5754df057 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -15,7 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] # third-party dependencies codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" hex-literal = { version = "0.3.1", optional = true } log = { version = "0.4.14", default-features = false } @@ -58,7 +57,7 @@ pallet-contracts-primitives = { version = "3.0.0", default-features = false, pat pallet-contracts-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } pallet-democracy = { version = "3.0.0", default-features = false, path = "../../../frame/democracy" } pallet-election-provider-multi-phase = { version = "3.0.0", default-features = false, path = "../../../frame/election-provider-multi-phase" } -pallet-elections-phragmen = { version = "3.0.0", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-elections-phragmen = { version = "4.0.0", default-features = false, path = "../../../frame/elections-phragmen" } pallet-grandpa = { version = "3.0.0", default-features = false, path = "../../../frame/grandpa" } pallet-im-online = { version = "3.0.0", default-features = false, path = "../../../frame/im-online" } pallet-indices = { version = "3.0.0", default-features = false, path = "../../../frame/indices" } @@ -133,7 +132,6 @@ std = [ "sp-io/std", "pallet-randomness-collective-flip/std", "sp-std/std", - "serde", "pallet-session/std", "sp-api/std", "sp-runtime/std", @@ -180,6 +178,7 @@ runtime-benchmarks = [ "pallet-identity/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", "pallet-indices/runtime-benchmarks", + "pallet-membership/runtime-benchmarks", "pallet-multisig/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", "pallet-scheduler/runtime-benchmarks", diff --git a/bin/node/runtime/src/constants.rs b/bin/node/runtime/src/constants.rs index 840324f94fbd7..fb7890c7f77fd 100644 --- a/bin/node/runtime/src/constants.rs +++ b/bin/node/runtime/src/constants.rs @@ -54,6 +54,8 @@ pub mod time { pub const MILLISECS_PER_BLOCK: Moment = 6000; pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; + // NOTE: Currently it is not possible to change the slot duration after the chain has started. + // Attempting to do so will brick block production. pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index e4bb4cdf95d2d..18170eb62e267 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -29,7 +29,7 @@ impl OnUnbalanced for Author { #[cfg(test)] mod multiplier_tests { - use sp_runtime::{assert_eq_error_rate, FixedPointNumber, traits::Convert}; + use sp_runtime::{assert_eq_error_rate, FixedPointNumber, traits::{Convert, One, Zero}}; use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment}; use crate::{ @@ -74,7 +74,7 @@ mod multiplier_tests { let m = max_normal() as f64; // block weight always truncated to max weight let block_weight = (block_weight as f64).min(m); - let v: f64 = AdjustmentVariable::get().to_fraction(); + let v: f64 = AdjustmentVariable::get().to_float(); // Ideal saturation in terms of weight let ss = target() as f64; diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 12bd2e03a720b..feffa855bae71 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -40,7 +40,7 @@ use frame_system::{ EnsureRoot, EnsureOneOf, limits::{BlockWeights, BlockLength} }; -use frame_support::traits::InstanceFilter; +use frame_support::{traits::InstanceFilter, PalletId}; use codec::{Encode, Decode}; use sp_core::{ crypto::KeyTypeId, @@ -55,7 +55,7 @@ use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; use sp_api::impl_runtime_apis; use sp_runtime::{ Permill, Perbill, Perquintill, Percent, ApplyExtrinsicResult, impl_opaque_keys, generic, - create_runtime_str, ModuleId, FixedPointNumber, + create_runtime_str, FixedPointNumber, }; use sp_runtime::curve::PiecewiseLinear; use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; @@ -119,8 +119,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 285, - impl_version: 0, + spec_version: 286, + impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 2, }; @@ -219,6 +219,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = frame_system::weights::SubstrateWeight; type SS58Prefix = SS58Prefix; + type OnSetCode = (); } impl pallet_utility::Config for Runtime { @@ -331,6 +332,8 @@ impl pallet_scheduler::Config for Runtime { } parameter_types! { + // NOTE: Currently it is not possible to change the epoch duration after the chain has started. + // Attempting to do so will brick block production. pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; pub const ReportLongevity: u64 = @@ -477,8 +480,7 @@ parameter_types! { } impl pallet_staking::Config for Runtime { - const MAX_NOMINATIONS: u32 = - ::LIMIT as u32; + const MAX_NOMINATIONS: u32 = MAX_NOMINATIONS; type Currency = Balances; type UnixTime = Timestamp; type CurrencyToVote = U128CurrencyToVote; @@ -521,14 +523,25 @@ parameter_types! { .get(DispatchClass::Normal) .max_extrinsic.expect("Normal extrinsics have a weight limit configured; qed") .saturating_sub(BlockExecutionWeight::get()); + // Solution can occupy 90% of normal block size + pub MinerMaxLength: u32 = Perbill::from_rational(9u32, 10) * + *RuntimeBlockLength::get() + .max + .get(DispatchClass::Normal); } sp_npos_elections::generate_solution_type!( #[compact] - pub struct NposCompactSolution16::(16) - // -------------------- ^^ + pub struct NposCompactSolution16::< + VoterIndex = u32, + TargetIndex = u16, + Accuracy = sp_runtime::PerU16, + >(16) ); +pub const MAX_NOMINATIONS: u32 = + ::LIMIT as u32; + impl pallet_election_provider_multi_phase::Config for Runtime { type Event = Event; type Currency = Balances; @@ -537,6 +550,7 @@ impl pallet_election_provider_multi_phase::Config for Runtime { type SolutionImprovementThreshold = SolutionImprovementThreshold; type MinerMaxIterations = MinerMaxIterations; type MinerMaxWeight = MinerMaxWeight; + type MinerMaxLength = MinerMaxLength; type MinerTxPriority = MultiPhaseUnsignedPriority; type DataProvider = Staking; type OnChainAccuracy = Perbill; @@ -631,7 +645,7 @@ parameter_types! { pub const TermDuration: BlockNumber = 7 * DAYS; pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 20; - pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; + pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } // Make sure that there are no more than `MaxMembers` members elected via elections-phragmen. @@ -639,7 +653,7 @@ const_assert!(DesiredMembers::get() <= CouncilMaxMembers::get()); impl pallet_elections_phragmen::Config for Runtime { type Event = Event; - type ModuleId = ElectionsPhragmenModuleId; + type PalletId = ElectionsPhragmenPalletId; type Currency = Balances; type ChangeMembers = Council; // NOTE: this implies that council's genesis members cannot be set directly and must come from @@ -689,6 +703,8 @@ impl pallet_membership::Config for Runtime { type PrimeOrigin = EnsureRootOrHalfCouncil; type MembershipInitialized = TechnicalCommittee; type MembershipChanged = TechnicalCommittee; + type MaxMembers = TechnicalMaxMembers; + type WeightInfo = pallet_membership::weights::SubstrateWeight; } parameter_types! { @@ -702,15 +718,16 @@ parameter_types! { pub const DataDepositPerByte: Balance = 1 * CENTS; pub const BountyDepositBase: Balance = 1 * DOLLARS; pub const BountyDepositPayoutDelay: BlockNumber = 8 * DAYS; - pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const BountyUpdatePeriod: BlockNumber = 90 * DAYS; pub const MaximumReasonLength: u32 = 16384; pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: Balance = 10 * DOLLARS; + pub const MaxApprovals: u32 = 100; } impl pallet_treasury::Config for Runtime { - type ModuleId = TreasuryModuleId; + type PalletId = TreasuryPalletId; type Currency = Balances; type ApproveOrigin = EnsureOneOf< AccountId, @@ -731,6 +748,7 @@ impl pallet_treasury::Config for Runtime { type BurnDestination = (); type SpendFunds = Bounties; type WeightInfo = pallet_treasury::weights::SubstrateWeight; + type MaxApprovals = MaxApprovals; } impl pallet_bounties::Config for Runtime { @@ -968,12 +986,12 @@ parameter_types! { pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; pub const ChallengePeriod: BlockNumber = 7 * DAYS; pub const MaxCandidateIntake: u32 = 10; - pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); + pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); } impl pallet_society::Config for Runtime { type Event = Event; - type ModuleId = SocietyModuleId; + type PalletId = SocietyPalletId; type Currency = Balances; type Randomness = RandomnessCollectiveFlip; type CandidateDeposit = CandidateDeposit; @@ -1308,7 +1326,9 @@ impl_runtime_apis! { } } - impl pallet_contracts_rpc_runtime_api::ContractsApi + impl pallet_contracts_rpc_runtime_api::ContractsApi< + Block, AccountId, Balance, BlockNumber, Hash, + > for Runtime { fn call( @@ -1321,6 +1341,18 @@ impl_runtime_apis! { Contracts::bare_call(origin, dest, value, gas_limit, input_data) } + fn instantiate( + origin: AccountId, + endowment: Balance, + gas_limit: u64, + code: pallet_contracts_primitives::Code, + data: Vec, + salt: Vec, + ) -> pallet_contracts_primitives::ContractInstantiateResult + { + Contracts::bare_instantiate(origin, endowment, gas_limit, code, data, salt, true) + } + fn get_storage( address: AccountId, key: [u8; 32], @@ -1414,6 +1446,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_identity, Identity); add_benchmark!(params, batches, pallet_im_online, ImOnline); add_benchmark!(params, batches, pallet_indices, Indices); + add_benchmark!(params, batches, pallet_membership, TechnicalMembership); add_benchmark!(params, batches, pallet_multisig, Multisig); add_benchmark!(params, batches, pallet_offences, OffencesBench::); add_benchmark!(params, batches, pallet_proxy, Proxy); diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml index f94575e8e621b..9d810ddbcfde0 100644 --- a/bin/node/test-runner-example/Cargo.toml +++ b/bin/node/test-runner-example/Cargo.toml @@ -36,5 +36,4 @@ sp-api = { version = "3.0.0", path = "../../../primitives/api" } sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } -rand = "0.8.3" log = "0.4.14" diff --git a/bin/node/test-runner-example/src/lib.rs b/bin/node/test-runner-example/src/lib.rs index 22cfffa7f23a7..ac589437248ee 100644 --- a/bin/node/test-runner-example/src/lib.rs +++ b/bin/node/test-runner-example/src/lib.rs @@ -18,9 +18,9 @@ //! Basic example of end to end runtime tests. -use test_runner::{Node, ChainInfo, SignatureVerificationOverride}; +use test_runner::{Node, ChainInfo, SignatureVerificationOverride, default_config}; use grandpa::GrandpaBlockImport; -use sc_service::{TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts}; +use sc_service::{TFullBackend, TFullClient, Configuration, TaskManager, new_full_parts, TaskExecutor}; use std::sync::Arc; use sp_inherents::InherentDataProviders; use sc_consensus_babe::BabeBlockImport; @@ -29,6 +29,7 @@ use sp_keyring::sr25519::Keyring::Alice; use sp_consensus_babe::AuthorityId; use sc_consensus_manual_seal::{ConsensusDataProvider, consensus::babe::BabeConsensusDataProvider}; use sp_runtime::{traits::IdentifyAccount, MultiSigner, generic::Era}; +use node_cli::chain_spec::development_config; type BlockImport = BabeBlockImport>; @@ -71,6 +72,10 @@ impl ChainInfo for NodeTemplateChainInfo { ) } + fn config(task_executor: TaskExecutor) -> Configuration { + default_config(task_executor, Box::new(development_config())) + } + fn create_client_parts( config: &Configuration, ) -> Result< @@ -151,20 +156,10 @@ mod tests { use super::*; use test_runner::NodeConfig; use log::LevelFilter; - use sc_client_api::execution_extensions::ExecutionStrategies; - use node_cli::chain_spec::development_config; #[test] fn test_runner() { let config = NodeConfig { - execution_strategies: ExecutionStrategies { - syncing: sc_client_api::ExecutionStrategy::AlwaysWasm, - importing: sc_client_api::ExecutionStrategy::AlwaysWasm, - block_construction: sc_client_api::ExecutionStrategy::AlwaysWasm, - offchain_worker: sc_client_api::ExecutionStrategy::AlwaysWasm, - other: sc_client_api::ExecutionStrategy::AlwaysWasm, - }, - chain_spec: Box::new(development_config()), log_targets: vec![ ("yamux", LevelFilter::Off), ("multistream_select", LevelFilter::Off), diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index cc6d7587dd517..edb99c617771a 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -691,7 +691,7 @@ impl BenchContext { assert_eq!(self.client.chain_info().best_number, 0); assert_eq!( - self.client.import_block(import_params, Default::default()) + futures::executor::block_on(self.client.import_block(import_params, Default::default())) .expect("Failed to import block"), ImportResult::Imported( ImportedAux { diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index f3336b1d53a84..2aaef7c96d9ab 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -41,6 +41,10 @@ enum ChainSpecBuilder { /// Authority key seed. #[structopt(long, short, required = true)] authority_seeds: Vec, + /// Active nominators (SS58 format), each backing a random subset of the aforementioned + /// authorities. + #[structopt(long, short, default_value = "0")] + nominator_accounts: Vec, /// Endowed account address (SS58 format). #[structopt(long, short)] endowed_accounts: Vec, @@ -57,6 +61,11 @@ enum ChainSpecBuilder { /// The number of authorities. #[structopt(long, short)] authorities: usize, + /// The number of nominators backing the aforementioned authorities. + /// + /// Will nominate a random subset of `authorities`. + #[structopt(long, short, default_value = "0")] + nominators: usize, /// The number of endowed accounts. #[structopt(long, short, default_value = "0")] endowed: usize, @@ -87,6 +96,7 @@ impl ChainSpecBuilder { fn genesis_constructor( authority_seeds: &[String], + nominator_accounts: &[AccountId], endowed_accounts: &[AccountId], sudo_account: &AccountId, ) -> chain_spec::GenesisConfig { @@ -100,6 +110,7 @@ fn genesis_constructor( chain_spec::testnet_genesis( authorities, + nominator_accounts.to_vec(), sudo_account.clone(), Some(endowed_accounts.to_vec()), enable_println, @@ -108,26 +119,28 @@ fn genesis_constructor( fn generate_chain_spec( authority_seeds: Vec, + nominator_accounts: Vec, endowed_accounts: Vec, sudo_account: String, ) -> Result { - let parse_account = |address: &String| { - AccountId::from_string(address) + let parse_account = |address: String| { + AccountId::from_string(&address) .map_err(|err| format!("Failed to parse account address: {:?}", err)) }; - let endowed_accounts = endowed_accounts - .iter() - .map(parse_account) - .collect::, String>>()?; + let nominator_accounts = + nominator_accounts.into_iter().map(parse_account).collect::, String>>()?; + + let endowed_accounts = + endowed_accounts.into_iter().map(parse_account).collect::, String>>()?; - let sudo_account = parse_account(&sudo_account)?; + let sudo_account = parse_account(sudo_account)?; let chain_spec = chain_spec::ChainSpec::from_genesis( "Custom", "custom", sc_chain_spec::ChainType::Live, - move || genesis_constructor(&authority_seeds, &endowed_accounts, &sudo_account), + move || genesis_constructor(&authority_seeds, &nominator_accounts, &endowed_accounts, &sudo_account), vec![], None, None, @@ -186,6 +199,7 @@ fn generate_authority_keys_and_store( fn print_seeds( authority_seeds: &[String], + nominator_seeds: &[String], endowed_seeds: &[String], sudo_seed: &str, ) { @@ -201,6 +215,12 @@ fn print_seeds( ); } + println!("{}", header.paint("Nominator seeds")); + + for (n, seed) in nominator_seeds.iter().enumerate() { + println!("{} //{}", entry.paint(format!("nom-{}:", n)), seed); + } + println!(); if !endowed_seeds.is_empty() { @@ -220,34 +240,27 @@ fn print_seeds( } fn main() -> Result<(), String> { - #[cfg(build_type="debug")] + #[cfg(build_type = "debug")] println!( - "The chain spec builder builds a chain specification that includes a Substrate runtime compiled as WASM. To \ - ensure proper functioning of the included runtime compile (or run) the chain spec builder binary in \ - `--release` mode.\n", + "The chain spec builder builds a chain specification that includes a Substrate runtime \ + compiled as WASM. To ensure proper functioning of the included runtime compile (or run) \ + the chain spec builder binary in `--release` mode.\n", ); let builder = ChainSpecBuilder::from_args(); let chain_spec_path = builder.chain_spec_path().to_path_buf(); - let (authority_seeds, endowed_accounts, sudo_account) = match builder { - ChainSpecBuilder::Generate { authorities, endowed, keystore_path, .. } => { + let (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) = match builder { + ChainSpecBuilder::Generate { authorities, nominators, endowed, keystore_path, .. } => { let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric) - .take(32) - .collect() - }; + let rand_str = || -> String { OsRng.sample_iter(&Alphanumeric).take(32).collect() }; let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); + let nominator_seeds = (0..nominators).map(|_| rand_str()).collect::>(); let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); let sudo_seed = rand_str(); - print_seeds( - &authority_seeds, - &endowed_seeds, - &sudo_seed, - ); + print_seeds(&authority_seeds, &nominator_seeds, &endowed_seeds, &sudo_seed); if let Some(keystore_path) = keystore_path { generate_authority_keys_and_store( @@ -256,23 +269,37 @@ fn main() -> Result<(), String> { )?; } - let endowed_accounts = endowed_seeds.iter().map(|seed| { - chain_spec::get_account_id_from_seed::(seed) - .to_ss58check() - }).collect(); + let nominator_accounts = nominator_seeds + .into_iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(&seed).to_ss58check() + }) + .collect(); - let sudo_account = chain_spec::get_account_id_from_seed::(&sudo_seed) - .to_ss58check(); + let endowed_accounts = endowed_seeds + .into_iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(&seed).to_ss58check() + }) + .collect(); - (authority_seeds, endowed_accounts, sudo_account) - }, - ChainSpecBuilder::New { authority_seeds, endowed_accounts, sudo_account, .. } => { - (authority_seeds, endowed_accounts, sudo_account) - }, + let sudo_account = + chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); + + (authority_seeds, nominator_accounts, endowed_accounts, sudo_account) + } + ChainSpecBuilder::New { + authority_seeds, + nominator_accounts, + endowed_accounts, + sudo_account, + .. + } => (authority_seeds, nominator_accounts, endowed_accounts, sudo_account), }; let json = generate_chain_spec( authority_seeds, + nominator_accounts, endowed_accounts, sudo_account, )?; diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs index 8fec00403bde1..96a5a272916e5 100644 --- a/client/api/src/cht.rs +++ b/client/api/src/cht.rs @@ -218,12 +218,9 @@ pub fn for_each_cht_group( let mut current_cht_num = None; let mut current_cht_blocks = Vec::new(); for block in blocks { - let new_cht_num = match block_to_cht_number(cht_size, block) { - Some(new_cht_num) => new_cht_num, - None => return Err(ClientError::Backend(format!( - "Cannot compute CHT root for the block #{}", block)).into() - ), - }; + let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| ClientError::Backend(format!( + "Cannot compute CHT root for the block #{}", block)) + )?; let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); if advance_to_next_cht { diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 930ae39c4b523..409b5f52b5d3c 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -226,10 +226,8 @@ impl Blockchain { /// Set an existing block as head. pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - let header = match self.header(id)? { - Some(h) => h, - None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), - }; + let header = self.header(id)? + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))?; self.apply_head(&header) } @@ -760,10 +758,8 @@ impl backend::Backend for Backend where Block::Hash _ => {}, } - match self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) { - Some(state) => Ok(state), - None => Err(sp_blockchain::Error::UnknownBlock(format!("{}", block))), - } + self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) + .ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", block))) } fn revert( diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index bfd419ec9a581..b043a332d667d 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -51,19 +51,18 @@ impl StorageChangeSet { .map(move |(k,v)| (None, k, v.as_ref())); let children = self.child_changes .iter() - .filter_map(move |(sk, changes)| { - if let Some(cf) = self.child_filters.as_ref() { - if let Some(filter) = cf.get(sk) { - Some(changes + .filter_map(move |(sk, changes)| + self.child_filters.as_ref().and_then(|cf| + cf.get(sk).map(|filter| changes .iter() .filter(move |&(key, _)| match filter { Some(ref filter) => filter.contains(key), None => true, }) - .map(move |(k,v)| (Some(sk), k, v.as_ref()))) - } else { None } - } else { None } - }) + .map(move |(k,v)| (Some(sk), k, v.as_ref())) + ) + ) + ) .flatten(); top.chain(children) } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 4a92186c444b7..5b5baa999c8b3 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -23,7 +23,8 @@ derive_more = "0.99.2" either = "1.5.3" futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.36.0", default-features = false, features = ["kad"] } +ip_network = "0.3.4" +libp2p = { version = "0.37.1", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} prost = "0.7" diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 818eb1beb3ffe..ab6338963da46 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -53,10 +53,23 @@ pub struct WorkerConfig { /// /// By default this is set to 1 hour. pub max_publish_interval: Duration, + /// Interval at which the keystore is queried. If the keys have changed, unconditionally + /// re-publish its addresses on the DHT. + /// + /// By default this is set to 1 minute. + pub keystore_refresh_interval: Duration, /// The maximum interval in which the node will query the DHT for new entries. /// /// By default this is set to 10 minutes. pub max_query_interval: Duration, + + /// If `false`, the node won't publish on the DHT multiaddresses that contain non-global + /// IP addresses (such as 10.0.0.1). + /// + /// Recommended: `false` for live chains, and `true` for local chains or for testing. + /// + /// Defaults to `true` to avoid the surprise factor. + pub publish_non_global_ips: bool, } impl Default for WorkerConfig { @@ -67,6 +80,7 @@ impl Default for WorkerConfig { // not depend on the republishing process, thus publishing own external addresses should // happen on an interval < 36h. max_publish_interval: Duration::from_secs(1 * 60 * 60), + keystore_refresh_interval: Duration::from_secs(60), // External addresses of remote authorities can change at any given point in time. The // interval on which to trigger new queries for the current and next authorities is a trade // off between efficiency and performance. @@ -75,6 +89,7 @@ impl Default for WorkerConfig { // comparing `authority_discovery_authority_addresses_requested_total` and // `authority_discovery_dht_event_received`. max_query_interval: Duration::from_secs(10 * 60), + publish_non_global_ips: true, } } } diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index b1fb89669bf2e..3b76215dc24c5 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -25,11 +25,12 @@ use std::sync::Arc; use std::time::Duration; use futures::channel::mpsc; -use futures::{FutureExt, Stream, StreamExt, stream::Fuse}; +use futures::{future, FutureExt, Stream, StreamExt, stream::Fuse}; use addr_cache::AddrCache; use async_trait::async_trait; use codec::Decode; +use ip_network::IpNetwork; use libp2p::{core::multiaddr, multihash::{Multihash, Hasher}}; use log::{debug, error, log_enabled}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; @@ -44,7 +45,7 @@ use sc_network::{ PeerId, }; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; -use sp_core::crypto::{key_types, Pair}; +use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; use sp_keystore::CryptoStore; use sp_runtime::{traits::Block as BlockT, generic::BlockId}; use sp_api::ProvideRuntimeApi; @@ -109,6 +110,15 @@ pub struct Worker { /// Interval to be proactive, publishing own addresses. publish_interval: ExpIncInterval, + /// Pro-actively publish our own addresses at this interval, if the keys in the keystore + /// have changed. + publish_if_changed_interval: ExpIncInterval, + /// List of keys onto which addresses have been published at the latest publication. + /// Used to check whether they have changed. + latest_published_keys: HashSet, + /// Same value as in the configuration. + publish_non_global_ips: bool, + /// Interval at which to request addresses of authorities, refilling the pending lookups queue. query_interval: ExpIncInterval, @@ -160,6 +170,13 @@ where config.max_query_interval, ); + // An `ExpIncInterval` is overkill here because the interval is constant, but consistency + // is more simple. + let publish_if_changed_interval = ExpIncInterval::new( + config.keystore_refresh_interval, + config.keystore_refresh_interval + ); + let addr_cache = AddrCache::new(); let metrics = match prometheus_registry { @@ -181,6 +198,9 @@ where network, dht_event_rx, publish_interval, + publish_if_changed_interval, + latest_published_keys: HashSet::new(), + publish_non_global_ips: config.publish_non_global_ips, query_interval, pending_lookups: Vec::new(), in_flight_lookups: HashMap::new(), @@ -212,8 +232,11 @@ where self.process_message_from_service(msg); }, // Publish own addresses. - _ = self.publish_interval.next().fuse() => { - if let Err(e) = self.publish_ext_addresses().await { + only_if_changed = future::select( + self.publish_interval.next().map(|_| false), + self.publish_if_changed_interval.next().map(|_| true) + ).map(|e| e.factor_first().0).fuse() => { + if let Err(e) = self.publish_ext_addresses(only_if_changed).await { error!( target: LOG_TARGET, "Failed to publish external addresses: {:?}", e, @@ -248,10 +271,24 @@ where } } - fn addresses_to_publish(&self) -> impl ExactSizeIterator { + fn addresses_to_publish(&self) -> impl Iterator { let peer_id: Multihash = self.network.local_peer_id().into(); + let publish_non_global_ips = self.publish_non_global_ips; self.network.external_addresses() .into_iter() + .filter(move |a| { + if publish_non_global_ips { + return true; + } + + a.iter().all(|p| match p { + // The `ip_network` library is used because its `is_global()` method is stable, + // while `is_global()` in the standard library currently isn't. + multiaddr::Protocol::Ip4(ip) if !IpNetwork::from(ip).is_global() => false, + multiaddr::Protocol::Ip6(ip) if !IpNetwork::from(ip).is_global() => false, + _ => true, + }) + }) .map(move |a| { if a.iter().any(|p| matches!(p, multiaddr::Protocol::P2p(_))) { a @@ -262,13 +299,25 @@ where } /// Publish own public addresses. - async fn publish_ext_addresses(&mut self) -> Result<()> { + /// + /// If `only_if_changed` is true, the function has no effect if the list of keys to publish + /// is equal to `self.latest_published_keys`. + async fn publish_ext_addresses(&mut self, only_if_changed: bool) -> Result<()> { let key_store = match &self.role { Role::PublishAndDiscover(key_store) => key_store, Role::Discover => return Ok(()), }; - let addresses = self.addresses_to_publish(); + let keys = Worker::::get_own_public_keys_within_authority_set( + key_store.clone(), + self.client.as_ref(), + ).await?.into_iter().map(Into::into).collect::>(); + + if only_if_changed && keys == self.latest_published_keys { + return Ok(()) + } + + let addresses = self.addresses_to_publish().map(|a| a.to_vec()).collect::>(); if let Some(metrics) = &self.metrics { metrics.publish.inc(); @@ -278,22 +327,18 @@ where } let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses: addresses.map(|a| a.to_vec()).collect() } + schema::AuthorityAddresses { addresses } .encode(&mut serialized_addresses) .map_err(Error::EncodingProto)?; - let keys = Worker::::get_own_public_keys_within_authority_set( - key_store.clone(), - self.client.as_ref(), - ).await?.into_iter().map(Into::into).collect::>(); - + let keys_vec = keys.iter().cloned().collect::>(); let signatures = key_store.sign_with_all( key_types::AUTHORITY_DISCOVERY, - keys.clone(), + keys_vec.clone(), serialized_addresses.as_slice(), ).await.map_err(|_| Error::Signing)?; - for (sign_result, key) in signatures.into_iter().zip(keys) { + for (sign_result, key) in signatures.into_iter().zip(keys_vec.iter()) { let mut signed_addresses = vec![]; // Verify that all signatures exist for all provided keys. @@ -313,6 +358,8 @@ where ); } + self.latest_published_keys = keys; + Ok(()) } diff --git a/client/authority-discovery/src/worker/addr_cache.rs b/client/authority-discovery/src/worker/addr_cache.rs index 13b259fbbb10d..7cefff1aaff07 100644 --- a/client/authority-discovery/src/worker/addr_cache.rs +++ b/client/authority-discovery/src/worker/addr_cache.rs @@ -24,6 +24,9 @@ use sc_network::PeerId; /// Cache for [`AuthorityId`] -> [`Vec`] and [`PeerId`] -> [`AuthorityId`] mappings. pub(super) struct AddrCache { + // The addresses found in `authority_id_to_addresses` are guaranteed to always match + // the peerids found in `peer_id_to_authority_id`. In other words, these two hashmaps + // are similar to a bi-directional map. authority_id_to_addresses: HashMap>, peer_id_to_authority_id: HashMap, } @@ -43,17 +46,44 @@ impl AddrCache { return; } + addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + // Insert into `self.peer_id_to_authority_id`. let peer_ids = addresses.iter() .map(|a| peer_id_from_multiaddr(a)) .filter_map(|peer_id| peer_id); - for peer_id in peer_ids { - self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()); + for peer_id in peer_ids.clone() { + let former_auth = match self.peer_id_to_authority_id.insert(peer_id, authority_id.clone()) { + Some(a) if a != authority_id => a, + _ => continue, + }; + + // PeerId was associated to a different authority id before. + // Remove corresponding authority from `self.authority_id_to_addresses`. + let former_auth_addrs = match self.authority_id_to_addresses.get_mut(&former_auth) { + Some(a) => a, + None => { debug_assert!(false); continue } + }; + former_auth_addrs.retain(|a| peer_id_from_multiaddr(a).map_or(true, |p| p != peer_id)); } // Insert into `self.authority_id_to_addresses`. - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - self.authority_id_to_addresses.insert(authority_id, addresses); + for former_addr in + self.authority_id_to_addresses.insert(authority_id.clone(), addresses.clone()).unwrap_or_default() + { + // Must remove from `self.peer_id_to_authority_id` any PeerId formerly associated + // to that authority but that can't be found in its new addresses. + + let peer_id = match peer_id_from_multiaddr(&former_addr) { + Some(p) => p, + None => continue, + }; + + if !peer_ids.clone().any(|p| p == peer_id) { + let _old_auth = self.peer_id_to_authority_id.remove(&peer_id); + debug_assert!(_old_auth.is_some()); + } + } } /// Returns the number of authority IDs in the cache. @@ -144,6 +174,25 @@ mod tests { } } + #[derive(Clone, Debug)] + struct TestMultiaddrsSamePeerCombo(Multiaddr, Multiaddr); + + impl Arbitrary for TestMultiaddrsSamePeerCombo { + fn arbitrary(g: &mut Gen) -> Self { + let seed = (0..32).map(|_| u8::arbitrary(g)).collect::>(); + let peer_id = PeerId::from_multihash( + Multihash::wrap(multihash::Code::Sha2_256.into(), &seed).unwrap() + ).unwrap(); + let multiaddr1 = "/ip6/2001:db8:0:0:0:0:0:2/tcp/30333".parse::() + .unwrap() + .with(Protocol::P2p(peer_id.clone().into())); + let multiaddr2 = "/ip6/2002:db8:0:0:0:0:0:2/tcp/30133".parse::() + .unwrap() + .with(Protocol::P2p(peer_id.into())); + TestMultiaddrsSamePeerCombo(multiaddr1, multiaddr2) + } + } + #[test] fn retains_only_entries_of_provided_authority_ids() { fn property( @@ -190,4 +239,73 @@ mod tests { .max_tests(10) .quickcheck(property as fn(_, _, _) -> TestResult) } + + #[test] + fn keeps_consistency_between_authority_id_and_peer_id() { + fn property( + authority1: TestAuthorityId, + authority2: TestAuthorityId, + multiaddr1: TestMultiaddr, + multiaddr2: TestMultiaddr, + multiaddr3: TestMultiaddrsSamePeerCombo, + ) -> TestResult { + let authority1 = authority1.0; + let authority2 = authority2.0; + let multiaddr1 = multiaddr1.0; + let multiaddr2 = multiaddr2.0; + let TestMultiaddrsSamePeerCombo(multiaddr3, multiaddr4) = multiaddr3; + + let mut cache = AddrCache::new(); + + cache.insert(authority1.clone(), vec![multiaddr1.clone()]); + cache.insert(authority1.clone(), vec![multiaddr2.clone(), multiaddr3.clone(), multiaddr4.clone()]); + + assert_eq!( + None, + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr1).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr4).unwrap()) + ); + + cache.insert(authority2.clone(), vec![multiaddr2.clone()]); + + assert_eq!( + Some(&authority2), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&authority1), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + ); + assert_eq!(cache.get_addresses_by_authority_id(&authority1).unwrap().len(), 2); + + cache.insert(authority2.clone(), vec![multiaddr2.clone(), multiaddr3.clone()]); + + assert_eq!( + Some(&authority2), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr2).unwrap()) + ); + assert_eq!( + Some(&authority2), + cache.get_authority_id_by_peer_id(&peer_id_from_multiaddr(&multiaddr3).unwrap()) + ); + assert!(cache.get_addresses_by_authority_id(&authority1).unwrap().is_empty()); + + TestResult::passed() + } + + QuickCheck::new() + .max_tests(10) + .quickcheck(property as fn(_, _, _, _, _) -> TestResult) + } } diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 04f597aa26b03..b702cd8c40085 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -294,7 +294,7 @@ fn publish_discover_cycle() { Default::default(), ); - worker.publish_ext_addresses().await.unwrap(); + worker.publish_ext_addresses(false).await.unwrap(); // Expect authority discovery to put a new record onto the dht. assert_eq!(network.put_value_call.lock().unwrap().len(), 1); diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 93ee4fc1445de..c8277d3b5d32c 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -22,7 +22,7 @@ use std::{pin::Pin, time, sync::Arc}; use sc_client_api::backend; -use codec::Decode; +use codec::{Decode, Encode}; use sp_consensus::{evaluation, Proposal, ProofRecording, DisableProofRecording, EnableProofRecording}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; @@ -42,14 +42,14 @@ use std::marker::PhantomData; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_proposer_metrics::MetricsLink as PrometheusMetrics; -/// Default maximum block size in bytes used by [`Proposer`]. +/// Default block size limit in bytes used by [`Proposer`]. /// -/// Can be overwritten by [`ProposerFactory::set_maximum_block_size`]. +/// Can be overwritten by [`ProposerFactory::set_block_size_limit`]. /// /// Be aware that there is also an upper packet size on what the networking code /// will accept. If the block doesn't fit in such a package, it can not be /// transferred to other nodes. -pub const DEFAULT_MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; +pub const DEFAULT_BLOCK_SIZE_LIMIT: usize = 4 * 1024 * 1024 + 512; /// Proposer factory. pub struct ProposerFactory { @@ -60,8 +60,14 @@ pub struct ProposerFactory { transaction_pool: Arc, /// Prometheus Link, metrics: PrometheusMetrics, - max_block_size: usize, + /// The default block size limit. + /// + /// If no `block_size_limit` is passed to [`Proposer::propose`], this block size limit will be + /// used. + default_block_size_limit: usize, telemetry: Option, + /// When estimating the block size, should the proof be included? + include_proof_in_block_size_estimation: bool, /// phantom member to pin the `Backend`/`ProofRecording` type. _phantom: PhantomData<(B, PR)>, } @@ -81,9 +87,10 @@ impl ProposerFactory { spawn_handle: Box::new(spawn_handle), transaction_pool, metrics: PrometheusMetrics::new(prometheus), - max_block_size: DEFAULT_MAX_BLOCK_SIZE, + default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, telemetry, client, + include_proof_in_block_size_estimation: false, _phantom: PhantomData, } } @@ -93,6 +100,9 @@ impl ProposerFactory { /// Create a new proposer factory with proof recording enabled. /// /// Each proposer created by this instance will record a proof while building a block. + /// + /// This will also include the proof into the estimation of the block size. This can be disabled + /// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`]. pub fn with_proof_recording( spawn_handle: impl SpawnNamed + 'static, client: Arc, @@ -101,24 +111,32 @@ impl ProposerFactory { telemetry: Option, ) -> Self { ProposerFactory { - spawn_handle: Box::new(spawn_handle), client, + spawn_handle: Box::new(spawn_handle), transaction_pool, metrics: PrometheusMetrics::new(prometheus), - max_block_size: DEFAULT_MAX_BLOCK_SIZE, + default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, telemetry, + include_proof_in_block_size_estimation: true, _phantom: PhantomData, } } + + /// Disable the proof inclusion when estimating the block size. + pub fn disable_proof_in_block_size_estimation(&mut self) { + self.include_proof_in_block_size_estimation = false; + } } impl ProposerFactory { - /// Set the maximum block size in bytes. + /// Set the default block size limit in bytes. /// - /// The default value for the maximum block size is: - /// [`DEFAULT_MAX_BLOCK_SIZE`]. - pub fn set_maximum_block_size(&mut self, size: usize) { - self.max_block_size = size; + /// The default value for the block size limit is: + /// [`DEFAULT_BLOCK_SIZE_LIMIT`]. + /// + /// If there is no block size limit passed to [`Proposer::propose`], this value will be used. + pub fn set_default_block_size_limit(&mut self, limit: usize) { + self.default_block_size_limit = limit; } } @@ -152,9 +170,10 @@ impl ProposerFactory transaction_pool: self.transaction_pool.clone(), now, metrics: self.metrics.clone(), - max_block_size: self.max_block_size, + default_block_size_limit: self.default_block_size_limit, telemetry: self.telemetry.clone(), _phantom: PhantomData, + include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, }; proposer @@ -195,7 +214,8 @@ pub struct Proposer { transaction_pool: Arc, now: Box time::Instant + Send + Sync>, metrics: PrometheusMetrics, - max_block_size: usize, + default_block_size_limit: usize, + include_proof_in_block_size_estimation: bool, telemetry: Option, _phantom: PhantomData<(B, PR)>, } @@ -225,6 +245,7 @@ impl sp_consensus::Proposer for inherent_data: InherentData, inherent_digests: DigestFor, max_duration: time::Duration, + block_size_limit: Option, ) -> Self::Proposal { let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); @@ -236,6 +257,7 @@ impl sp_consensus::Proposer for inherent_data, inherent_digests, deadline, + block_size_limit, ).await; if tx.send(res).is_err() { trace!("Could not send block production result to proposer!"); @@ -264,6 +286,7 @@ impl Proposer inherent_data: InherentData, inherent_digests: DigestFor, deadline: time::Instant, + block_size_limit: Option, ) -> Result, PR::Proof>, sp_blockchain::Error> { /// If the block is full we will attempt to push at most /// this number of transactions before quitting for real. @@ -297,7 +320,9 @@ impl Proposer let mut unqueue_invalid = Vec::new(); let mut t1 = self.transaction_pool.ready_at(self.parent_number).fuse(); - let mut t2 = futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); + let mut t2 = futures_timer::Delay::new( + deadline.saturating_duration_since((self.now)()) / 8, + ).fuse(); let pending_iterator = select! { res = t1 => res, @@ -311,8 +336,13 @@ impl Proposer }, }; + let block_size_limit = block_size_limit.unwrap_or(self.default_block_size_limit); + debug!("Attempting to push transactions from the pool."); debug!("Pool status: {:?}", self.transaction_pool.status()); + let mut transaction_pushed = false; + let mut hit_block_size_limit = false; + for pending_tx in pending_iterator { if (self.now)() > deadline { debug!( @@ -324,9 +354,30 @@ impl Proposer let pending_tx_data = pending_tx.data().clone(); let pending_tx_hash = pending_tx.hash().clone(); + + let block_size = block_builder.estimate_block_size( + self.include_proof_in_block_size_estimation, + ); + if block_size + pending_tx_data.encoded_size() > block_size_limit { + if skipped < MAX_SKIPPED_TRANSACTIONS { + skipped += 1; + debug!( + "Transaction would overflow the block size limit, \ + but will try {} more transactions before quitting.", + MAX_SKIPPED_TRANSACTIONS - skipped, + ); + continue; + } else { + debug!("Reached block size limit, proceeding with proposing."); + hit_block_size_limit = true; + break; + } + } + trace!("[{:?}] Pushing to the block.", pending_tx_hash); match sc_block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) { Ok(()) => { + transaction_pushed = true; debug!("[{:?}] Pushed to the block.", pending_tx_hash); } Err(ApplyExtrinsicFailed(Validity(e))) @@ -356,6 +407,13 @@ impl Proposer } } + if hit_block_size_limit && !transaction_pushed { + warn!( + "Hit block size limit of `{}` without including any transaction!", + block_size_limit, + ); + } + self.transaction_pool.remove_invalid(&unqueue_invalid); let (block, storage_changes, proof) = block_builder.build()?.into_inner(); @@ -367,7 +425,8 @@ impl Proposer } ); - info!("🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", + info!( + "🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", block.header().number(), ::Hash::from(block.header().hash()), block.header().parent_hash(), @@ -394,7 +453,6 @@ impl Proposer &block, &self.parent_hash, self.parent_number, - self.max_block_size, ) { error!("Failed to evaluate authored block: {:?}", err); } @@ -420,6 +478,8 @@ mod tests { use sp_blockchain::HeaderBackend; use sp_runtime::traits::NumberFor; use sc_client_api::Backend; + use futures::executor::block_on; + use sp_consensus::Environment; const SOURCE: TransactionSource = TransactionSource::External; @@ -454,11 +514,11 @@ mod tests { client.clone(), ); - futures::executor::block_on( + block_on( txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)]) ).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(0u64)) .expect("header get error") @@ -492,8 +552,8 @@ mod tests { // when let deadline = time::Duration::from_secs(3); - let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline) + let block = block_on( + proposer.propose(Default::default(), Default::default(), deadline, None) ).map(|r| r.block).unwrap(); // then @@ -538,8 +598,8 @@ mod tests { ); let deadline = time::Duration::from_secs(1); - futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline) + block_on( + proposer.propose(Default::default(), Default::default(), deadline, None) ).map(|r| r.block).unwrap(); } @@ -559,11 +619,11 @@ mod tests { let genesis_hash = client.info().best_hash; let block_id = BlockId::Hash(genesis_hash); - futures::executor::block_on( + block_on( txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)]), ).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(0u64)) .expect("header get error") @@ -585,8 +645,8 @@ mod tests { ); let deadline = time::Duration::from_secs(9); - let proposal = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline), + let proposal = block_on( + proposer.propose(Default::default(), Default::default(), deadline, None), ).unwrap(); assert_eq!(proposal.block.extrinsics().len(), 1); @@ -625,7 +685,7 @@ mod tests { client.clone(), ); - futures::executor::block_on( + block_on( txpool.submit_at(&BlockId::number(0), SOURCE, vec![ extrinsic(0), extrinsic(1), @@ -667,8 +727,8 @@ mod tests { // when let deadline = time::Duration::from_secs(9); - let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline) + let block = block_on( + proposer.propose(Default::default(), Default::default(), deadline, None) ).map(|r| r.block).unwrap(); // then @@ -679,7 +739,7 @@ mod tests { block }; - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(0u64)) .expect("header get error") @@ -689,9 +749,9 @@ mod tests { // let's create one block and import it let block = propose_block(&client, 0, 2, 7); - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); - futures::executor::block_on( + block_on( txpool.maintain(chain_event( client.header(&BlockId::Number(1)) .expect("header get error") @@ -701,6 +761,84 @@ mod tests { // now let's make sure that we can still make some progress let block = propose_block(&client, 1, 2, 5); - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); + } + + #[test] + fn should_cease_building_block_when_block_limit_is_reached() { + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner.clone(), + client.clone(), + ); + let genesis_header = client.header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"); + + let extrinsics_num = 4; + let extrinsics = (0..extrinsics_num) + .map(|v| Extrinsic::IncludeData(vec![v as u8; 10])) + .collect::>(); + + let block_limit = genesis_header.encoded_size() + + extrinsics.iter().take(extrinsics_num - 1).map(Encode::encoded_size).sum::() + + Vec::::new().encoded_size(); + + block_on( + txpool.submit_at(&BlockId::number(0), SOURCE, extrinsics) + ).unwrap(); + + block_on(txpool.maintain(chain_event(genesis_header.clone()))); + + let mut proposer_factory = ProposerFactory::new( + spawner.clone(), + client.clone(), + txpool.clone(), + None, + None, + ); + + let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); + + // Give it enough time + let deadline = time::Duration::from_secs(300); + let block = block_on( + proposer.propose(Default::default(), Default::default(), deadline, Some(block_limit)) + ).map(|r| r.block).unwrap(); + + // Based on the block limit, one transaction shouldn't be included. + assert_eq!(block.extrinsics().len(), extrinsics_num - 1); + + let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); + + let block = block_on( + proposer.propose(Default::default(), Default::default(), deadline, None, + )).map(|r| r.block).unwrap(); + + // Without a block limit we should include all of them + assert_eq!(block.extrinsics().len(), extrinsics_num); + + let mut proposer_factory = ProposerFactory::with_proof_recording( + spawner.clone(), + client.clone(), + txpool.clone(), + None, + None, + ); + + let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); + + // Give it enough time + let block = block_on( + proposer.propose(Default::default(), Default::default(), deadline, Some(block_limit)) + ).map(|r| r.block).unwrap(); + + // The block limit didn't changed, but we now include the proof in the estimation of the + // block size and thus, one less transaction should fit into the limit. + assert_eq!(block.extrinsics().len(), extrinsics_num - 2); } } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index acaf85db76336..133b833cdddc8 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -62,6 +62,7 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), +//! None, //! ); //! //! // We wait until the proposition is performed. @@ -72,4 +73,4 @@ mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_MAX_BLOCK_SIZE}; +pub use crate::basic_authorship::{ProposerFactory, Proposer, DEFAULT_BLOCK_SIZE_LIMIT}; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 4893072a71377..7d391f8fb85b3 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -135,6 +135,8 @@ pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> { block_id: BlockId, parent_hash: Block::Hash, backend: &'a B, + /// The estimated size of the block header. + estimated_header_size: usize, } impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> @@ -165,6 +167,8 @@ where inherent_digests, ); + let estimated_header_size = header.encoded_size(); + let mut api = api.runtime_api(); if record_proof.yes() { @@ -183,6 +187,7 @@ where api, block_id, backend, + estimated_header_size, }) } @@ -270,6 +275,20 @@ where )) }).map_err(|e| Error::Application(Box::new(e))) } + + /// Estimate the size of the block in the current state. + /// + /// If `include_proof` is `true`, the estimated size of the storage proof will be added + /// to the estimation. + pub fn estimate_block_size(&self, include_proof: bool) -> usize { + let size = self.estimated_header_size + self.extrinsics.encoded_size(); + + if include_proof { + size + self.api.proof_recorder().map(|pr| pr.estimate_encoded_size()).unwrap_or(0) + } else { + size + } + } } #[cfg(test)] diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 55748ffb3d903..00a56e5fa9b86 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,7 +18,7 @@ regex = "1.4.2" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.9" fdlimit = "0.2.1" -libp2p = "0.36.0" +libp2p = "0.37.1" parity-scale-codec = "2.0.0" hex = "0.4.2" rand = "0.7.3" diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 289d6dc7cc39f..a21a79afe9fdb 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -159,6 +159,7 @@ pub trait CliConfiguration: Sized { &self, chain_spec: &Box, is_dev: bool, + is_validator: bool, net_config_dir: PathBuf, client_id: &str, node_name: &str, @@ -169,6 +170,7 @@ pub trait CliConfiguration: Sized { network_params.network_config( chain_spec, is_dev, + is_validator, Some(net_config_dir), client_id, node_name, @@ -501,6 +503,7 @@ pub trait CliConfiguration: Sized { network: self.network_config( &chain_spec, is_dev, + is_validator, net_config_dir, client_id.as_str(), self.node_name()?.as_str(), diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index f4a6e8d3982ba..d4dcd6ebaa79f 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -22,7 +22,7 @@ use sc_network::{ multiaddr::Protocol, }; use sc_service::{ChainSpec, ChainType, config::{Multiaddr, MultiaddrWithPeerId}}; -use std::path::PathBuf; +use std::{borrow::Cow, path::PathBuf}; use structopt::StructOpt; /// Parameters used to create the network configuration. @@ -53,6 +53,10 @@ pub struct NetworkParams { pub public_addr: Vec, /// Listen on this multiaddress. + /// + /// By default: + /// If `--validator` is passed: `/ip4/0.0.0.0/tcp/` and `/ip6/[::]/tcp/`. + /// Otherwise: `/ip4/0.0.0.0/tcp//ws` and `/ip6/[::]/tcp//ws`. #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] pub listen_addr: Vec, @@ -60,12 +64,19 @@ pub struct NetworkParams { #[structopt(long = "port", value_name = "PORT", conflicts_with_all = &[ "listen-addr" ])] pub port: Option, - /// Forbid connecting to private IPv4 addresses (as specified in + /// Always forbid connecting to private IPv4 addresses (as specified in /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with - /// `--reserved-nodes` or `--bootnodes`. - #[structopt(long = "no-private-ipv4")] + /// `--reserved-nodes` or `--bootnodes`. Enabled by default for chains marked as "live" in + /// their chain specifications. + #[structopt(long = "no-private-ipv4", conflicts_with_all = &["allow-private-ipv4"])] pub no_private_ipv4: bool, + /// Always accept connecting to private IPv4 addresses (as specified in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Enabled by default for chains marked as + /// "local" in their chain specifications, or when `--dev` is passed. + #[structopt(long = "allow-private-ipv4", conflicts_with_all = &["no-private-ipv4"])] + pub allow_private_ipv4: bool, + /// Specify the number of outgoing connections we're trying to maintain. #[structopt(long = "out-peers", value_name = "COUNT", default_value = "25")] pub out_peers: u32, @@ -122,6 +133,7 @@ impl NetworkParams { &self, chain_spec: &Box, is_dev: bool, + is_validator: bool, net_config_path: Option, client_id: &str, node_name: &str, @@ -131,14 +143,27 @@ impl NetworkParams { let port = self.port.unwrap_or(default_listen_port); let listen_addresses = if self.listen_addr.is_empty() { - vec![ - Multiaddr::empty() - .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) - .with(Protocol::Tcp(port)), - Multiaddr::empty() - .with(Protocol::Ip4([0, 0, 0, 0].into())) - .with(Protocol::Tcp(port)), - ] + if is_validator { + vec![ + Multiaddr::empty() + .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)), + Multiaddr::empty() + .with(Protocol::Ip4([0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)), + ] + } else { + vec![ + Multiaddr::empty() + .with(Protocol::Ip6([0, 0, 0, 0, 0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)) + .with(Protocol::Ws(Cow::Borrowed("/"))), + Multiaddr::empty() + .with(Protocol::Ip4([0, 0, 0, 0].into())) + .with(Protocol::Tcp(port)) + .with(Protocol::Ws(Cow::Borrowed("/"))), + ] + } } else { self.listen_addr.clone() }; @@ -155,6 +180,13 @@ impl NetworkParams { || is_dev || matches!(chain_type, ChainType::Local | ChainType::Development); + let allow_private_ipv4 = match (self.allow_private_ipv4, self.no_private_ipv4) { + (true, true) => unreachable!("`*_private_ipv4` flags are mutually exclusive; qed"), + (true, false) => true, + (false, true) => false, + (false, false) => is_dev || matches!(chain_type, ChainType::Local | ChainType::Development), + }; + NetworkConfiguration { boot_nodes, net_config_path, @@ -177,7 +209,7 @@ impl NetworkParams { client_version: client_id.to_string(), transport: TransportConfig::Normal { enable_mdns: !is_dev && !self.no_mdns, - allow_private_ipv4: !self.no_private_ipv4, + allow_private_ipv4, wasm_external_transport: None, }, max_parallel_downloads: self.max_parallel_downloads, diff --git a/client/cli/src/params/offchain_worker_params.rs b/client/cli/src/params/offchain_worker_params.rs index ef39a1ed41be2..b41a5d5625266 100644 --- a/client/cli/src/params/offchain_worker_params.rs +++ b/client/cli/src/params/offchain_worker_params.rs @@ -23,14 +23,13 @@ //! targeted at handling input parameter parsing providing //! a reasonable abstraction. -use structopt::StructOpt; -use sc_service::config::OffchainWorkerConfig; use sc_network::config::Role; +use sc_service::config::OffchainWorkerConfig; +use structopt::StructOpt; use crate::error; use crate::OffchainWorkerEnabled; - /// Offchain worker related parameters. #[derive(Debug, StructOpt)] pub struct OffchainWorkerParams { @@ -59,11 +58,7 @@ pub struct OffchainWorkerParams { impl OffchainWorkerParams { /// Load spec to `Configuration` from `OffchainWorkerParams` and spec factory. - pub fn offchain_worker( - &self, - role: &Role, - ) -> error::Result - { + pub fn offchain_worker(&self, role: &Role) -> error::Result { let enabled = match (&self.enabled, role) { (OffchainWorkerEnabled::WhenValidating, Role::Authority { .. }) => true, (OffchainWorkerEnabled::Always, _) => true, @@ -71,8 +66,10 @@ impl OffchainWorkerParams { (OffchainWorkerEnabled::WhenValidating, _) => false, }; - let indexing_enabled = enabled && self.indexing_enabled; - - Ok(OffchainWorkerConfig { enabled, indexing_enabled }) + let indexing_enabled = self.indexing_enabled; + Ok(OffchainWorkerConfig { + enabled, + indexing_enabled, + }) } } diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 1465119c81d08..b2301fa9c5de5 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -26,7 +26,6 @@ futures = "0.3.9" futures-timer = "3.0.1" sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } log = "0.4.8" -parking_lot = "0.11.1" sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-io = { version = "3.0.0", path = "../../../primitives/io" } @@ -38,6 +37,7 @@ sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } sc-telemetry = { version = "3.0.0", path = "../../telemetry" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +async-trait = "0.1.42" # We enable it only for web-wasm check # See https://docs.rs/getrandom/0.2.1/getrandom/#webassembly-support getrandom = { version = "0.2", features = ["js"], optional = true } @@ -52,3 +52,4 @@ sc-network-test = { version = "0.8.0", path = "../../network/test" } sc-service = { version = "0.9.0", default-features = false, path = "../../service" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } tempfile = "3.1.0" +parking_lot = "0.11.1" diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index a0d08202da2f6..0ec95d9412c22 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -72,10 +72,7 @@ fn check_header( C: sc_client_api::backend::AuxStore, P::Public: Encode + Decode + PartialEq + Clone, { - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(Error::HeaderUnsealed(hash)), - }; + let seal = header.digest_mut().pop().ok_or_else(|| Error::HeaderUnsealed(hash))?; let sig = seal.as_aura_seal().ok_or_else(|| { aura_err(Error::HeaderBadSeal(hash)) @@ -89,10 +86,8 @@ fn check_header( } else { // check the signature is valid under the expected authority and // chain state. - let expected_author = match slot_author::

(slot, &authorities) { - None => return Err(Error::SlotAuthorNotFound), - Some(author) => author, - }; + let expected_author = slot_author::

(slot, &authorities) + .ok_or_else(|| Error::SlotAuthorNotFound)?; let pre_hash = header.hash(); @@ -220,6 +215,7 @@ impl AuraVerifier where } } +#[async_trait::async_trait] impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + @@ -234,7 +230,7 @@ impl Verifier for AuraVerifier where P::Signature: Encode + Decode, CAW: CanAuthorWith + Send + Sync + 'static, { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, @@ -405,6 +401,7 @@ impl, P> AuraBlockImport } } +#[async_trait::async_trait] impl BlockImport for AuraBlockImport where I: BlockImport> + Send + Sync, I::Error: Into, @@ -412,18 +409,19 @@ impl BlockImport for AuraBlockImport: Send + 'static, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, new_cache: HashMap>, @@ -453,7 +451,7 @@ impl BlockImport for AuraBlockImport( { AuraWorker { client, - block_import: Arc::new(Mutex::new(block_import)), + block_import, env: proposer_factory, keystore, sync_oracle, @@ -286,7 +285,7 @@ pub fn build_aura_worker( struct AuraWorker { client: Arc, - block_import: Arc>, + block_import: I, env: E, keystore: SyncCryptoStorePtr, sync_oracle: SO, @@ -326,8 +325,8 @@ where "aura" } - fn block_import(&self) -> Arc> { - self.block_import.clone() + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import } fn epoch_data( @@ -581,6 +580,7 @@ mod tests { use super::*; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, AlwaysCanAuthor, DisableProofRecording, + import_queue::BoxJustificationImport, }; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; @@ -629,6 +629,7 @@ mod tests { _: InherentData, digests: DigestFor, _: Duration, + _: Option, ) -> Self::Proposal { let r = self.1.new_block(digests).unwrap().build().map_err(|e| e.into()); @@ -642,13 +643,17 @@ mod tests { const SLOT_DURATION: u64 = 1000; + type AuraVerifier = import_queue::AuraVerifier; + type AuraPeer = Peer<(), PeersClient>; + pub struct AuraTestNet { - peers: Vec>, + peers: Vec, } impl TestNetFactory for AuraTestNet { - type Verifier = import_queue::AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); + type BlockImport = PeersClient; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -682,14 +687,22 @@ mod tests { } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn make_block_import(&self, client: PeersClient) -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) { + (client.as_block_import(), None, ()) + } + + fn peer(&mut self, i: usize) -> &mut AuraPeer { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -805,7 +818,7 @@ mod tests { let worker = AuraWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(client)), + block_import: client, env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), @@ -854,7 +867,7 @@ mod tests { let mut worker = AuraWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(client.clone())), + block_import: client.clone(), env: environ, keystore: keystore.into(), sync_oracle: DummyOracle.clone(), @@ -875,6 +888,7 @@ mod tests { ends_at: Instant::now() + Duration::from_secs(100), inherent_data: InherentData::new(), duration: Duration::from_millis(1000), + block_size_limit: None, }, )).unwrap(); diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 14d48fba1bb57..b04caeb3ee9d7 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -53,6 +53,7 @@ merlin = "2.0" pdqselect = "0.1.0" derive_more = "0.99.2" retain_mut = "0.1.2" +async-trait = "0.1.42" [dev-dependencies] sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } @@ -65,6 +66,3 @@ substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils sc-block-builder = { version = "0.9.0", path = "../../block-builder" } rand_chacha = "0.2.2" tempfile = "3.1.0" - -[features] -test-helpers = [] diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index ca14a764eece5..6696a65040a5e 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -217,7 +217,7 @@ fn epoch_data( SC: SelectChain, { let parent = select_chain.best_chain()?; - epoch_changes.lock().epoch_data_for_child_of( + epoch_changes.shared_data().epoch_data_for_child_of( descendent_query(&**client), &parent.hash(), parent.number().clone(), diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 7d5df77c92176..8b8804e3bfb02 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -18,8 +18,6 @@ //! Schema for BABE epoch changes in the aux-db. -use std::sync::Arc; -use parking_lot::Mutex; use log::info; use codec::{Decode, Encode}; @@ -79,18 +77,19 @@ pub fn load_epoch_changes( }, }; - let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { - info!(target: "babe", - "👶 Creating empty BABE epoch changes on what appears to be first startup." + let epoch_changes = SharedEpochChanges::::new(maybe_epoch_changes.unwrap_or_else(|| { + info!( + target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup.", ); EpochChangesFor::::default() - }))); + })); // rebalance the tree after deserialization. this isn't strictly necessary // since the tree is now rebalanced on every update operation. but since the // tree wasn't rebalanced initially it's useful to temporarily leave it here // to avoid having to wait until an import for rebalancing. - epoch_changes.lock().rebalance(); + epoch_changes.shared_data().rebalance(); Ok(epoch_changes) } @@ -189,7 +188,7 @@ mod test { ).unwrap(); assert!( - epoch_changes.lock() + epoch_changes.shared_data() .tree() .iter() .map(|(_, _, epoch)| epoch.clone()) @@ -201,7 +200,7 @@ mod test { ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. write_epoch_changes::( - &epoch_changes.lock(), + &epoch_changes.shared_data(), |values| { client.insert_aux(values, &[]).unwrap(); }, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index db13d0f3e420a..c3f1929c2ea8b 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -76,8 +76,8 @@ pub use sp_consensus_babe::{ pub use sp_consensus::SyncOracle; pub use sc_consensus_slots::SlotProportion; use std::{ - collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, - any::Any, borrow::Cow, convert::TryInto, + collections::HashMap, sync::Arc, u64, pin::Pin, borrow::Cow, convert::TryInto, + time::{Duration, Instant}, }; use sp_consensus::{ImportResult, CanAuthorWith, import_queue::BoxJustificationImport}; use sp_core::crypto::Public; @@ -438,7 +438,7 @@ pub fn start_babe(BabeParams { + Sync + 'static, Error: std::error::Error + Send + From + From + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, - CAW: CanAuthorWith + Send + 'static, + CAW: CanAuthorWith + Send + Sync + 'static, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { const HANDLE_BUFFER_SIZE: usize = 1024; @@ -448,7 +448,7 @@ pub fn start_babe(BabeParams { let worker = BabeSlotWorker { client: client.clone(), - block_import: Arc::new(Mutex::new(block_import)), + block_import, env, sync_oracle: sync_oracle.clone(), force_authoring, @@ -502,7 +502,7 @@ async fn answer_requests( match request { BabeRequest::EpochForChild(parent_hash, parent_number, slot_number, response) => { let lookup = || { - let epoch_changes = epoch_changes.lock(); + let epoch_changes = epoch_changes.shared_data(); let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( descendent_query(&*client), &parent_hash, @@ -605,7 +605,7 @@ type SlotNotificationSinks = Arc< struct BabeSlotWorker { client: Arc, - block_import: Arc>, + block_import: I, env: E, sync_oracle: SO, force_authoring: bool, @@ -647,8 +647,8 @@ where "babe" } - fn block_import(&self) -> Arc> { - self.block_import.clone() + fn block_import(&mut self) -> &mut Self::BlockImport { + &mut self.block_import } fn epoch_data( @@ -656,7 +656,7 @@ where parent: &B::Header, slot: Slot, ) -> Result { - self.epoch_changes.lock().epoch_descriptor_for_child_of( + self.epoch_changes.shared_data().epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), @@ -667,7 +667,8 @@ where } fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes.lock() + self.epoch_changes + .shared_data() .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) .map(|epoch| epoch.as_ref().authorities.len()) } @@ -681,7 +682,7 @@ where debug!(target: "babe", "Attempting to claim slot {}", slot); let s = authorship::claim_slot( slot, - self.epoch_changes.lock().viable_epoch( + self.epoch_changes.shared_data().viable_epoch( &epoch_descriptor, |slot| Epoch::genesis(&self.config, slot) )?.as_ref(), @@ -768,7 +769,7 @@ where import_block.storage_changes = Some(storage_changes); import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); Ok(import_block) @@ -1083,6 +1084,7 @@ where } } +#[async_trait::async_trait] impl Verifier for BabeVerifier where @@ -1093,7 +1095,7 @@ where SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: Block::Header, @@ -1125,7 +1127,7 @@ where .map_err(Error::::FetchParentHeader)?; let pre_digest = find_pre_digest::(&header)?; - let epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent_hash, @@ -1189,7 +1191,8 @@ where self.telemetry; CONSENSUS_TRACE; "babe.checked_and_importing"; - "pre_header" => ?pre_header); + "pre_header" => ?pre_header, + ); let mut import_block = BlockImportParams::new(origin, pre_header); import_block.post_digests.push(verified_info.seal); @@ -1197,7 +1200,7 @@ where import_block.justifications = justifications; import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); import_block.post_hash = Some(hash); @@ -1275,6 +1278,7 @@ impl BabeBlockImport { } } +#[async_trait::async_trait] impl BlockImport for BabeBlockImport where Block: BlockT, Inner: BlockImport> + Send + Sync, @@ -1286,7 +1290,7 @@ impl BlockImport for BabeBlockImport; - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -1328,202 +1332,209 @@ impl BlockImport for BabeBlockImport::ParentBlockNoAssociatedWeight(hash)).into() ))? - }; + }; - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; + let intermediate = block.take_intermediate::>( + INTERMEDIATE_KEY + )?; - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - (epoch_descriptor, first_in_epoch, parent_weight) - }; + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) + }; - let total_weight = parent_weight + pre_digest.added_weight(); - - // search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - let next_config_digest = find_next_config_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { - (true, true, _) => {}, - (false, false, false) => {}, - (false, false, true) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedConfigChange).into(), + let total_weight = parent_weight + pre_digest.added_weight(); + + // search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let next_config_digest = find_next_config_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some(), next_config_digest.is_some()) { + (true, true, _) => {}, + (false, false, false) => {}, + (false, false, true) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::UnexpectedConfigChange).into(), + ) ) - ) - }, - (true, false, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), + }, + (true, false, _) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot)).into(), + ) ) - ) - }, - (false, true, _) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::UnexpectedEpochChange).into(), + }, + (false, true, _) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::UnexpectedEpochChange).into(), + ) ) - ) - }, - } + }, + } - // if there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; + let info = self.client.info(); - let info = self.client.info(); + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some((*epoch_changes).clone()); - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some(epoch_changes.clone()); + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| Epoch::genesis(&self.config, slot) + ).ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| Epoch::genesis(&self.config, slot) - ).ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; + let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( + || viable_epoch.as_ref().config.clone() + ); - let epoch_config = next_config_digest.map(Into::into).unwrap_or_else( - || viable_epoch.as_ref().config.clone() - ); + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info - }; + log!(target: "babe", + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot, + viable_epoch.as_ref().start_slot, + ); - log!(target: "babe", - log_level, - "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot, - viable_epoch.as_ref().start_slot, - ); + let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); - let next_epoch = viable_epoch.increment((next_epoch_descriptor, epoch_config)); + log!(target: "babe", + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); - log!(target: "babe", - log_level, - "👶 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); + // prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized( + self.client.clone(), + &mut epoch_changes, + )?; - // prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized( - self.client.clone(), - &mut epoch_changes, - )?; + epoch_changes.import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; - epoch_changes.import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + Ok(()) + }; - Ok(()) - }; + if let Err(e) = prune_and_import() { + debug!(target: "babe", "Failed to launch next epoch: {:?}", e); + *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e); + } - if let Err(e) = prune_and_import() { - debug!(target: "babe", "Failed to launch next epoch: {:?}", e); - *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e); + crate::aux_schema::write_epoch_changes::( + &*epoch_changes, + |insert| block.auxiliary.extend( + insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ) + ); } - crate::aux_schema::write_epoch_changes::( - &*epoch_changes, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) + aux_schema::write_block_weight( + hash, + total_weight, + |values| block.auxiliary.extend( + values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ), ); - } - - aux_schema::write_block_weight( - hash, - total_weight, - |values| block.auxiliary.extend( - values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ), - ); - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or_else( || ConsensusError::ChainLookup("No block weight for parent header.".to_string()) )? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) }; - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) + // Release the mutex, but it stays locked + epoch_changes.release_mutex() }; - let import_result = self.inner.import_block(block, new_cache); + let import_result = self.inner.import_block(block, new_cache).await; // revert to the original epoch changes in case there's an error // importing the block if import_result.is_err() { if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes = old_epoch_changes; + *epoch_changes.upgrade() = old_epoch_changes; } } import_result.map_err(Into::into) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } } @@ -1583,7 +1594,7 @@ pub fn block_import( // startup rather than waiting until importing the next epoch change block. prune_finalized( client.clone(), - &mut epoch_changes.lock(), + &mut epoch_changes.shared_data(), )?; let import = BabeBlockImport::new( @@ -1647,41 +1658,3 @@ pub fn import_queue( registry, )) } - -/// BABE test helpers. Utility methods for manually authoring blocks. -#[cfg(feature = "test-helpers")] -pub mod test_helpers { - use super::*; - - /// Try to claim the given slot and return a `BabePreDigest` if - /// successful. - pub fn claim_slot( - slot: Slot, - parent: &B::Header, - client: &C, - keystore: SyncCryptoStorePtr, - link: &BabeLink, - ) -> Option where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, - C::Api: BabeApi, - { - let epoch_changes = link.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(client), - &parent.hash(), - parent.number().clone(), - slot, - |slot| Epoch::genesis(&link.config, slot), - ).unwrap().unwrap(); - - authorship::claim_slot( - slot, - &epoch, - &keystore, - ).map(|(digest, _)| digest) - } -} diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 70b4cd7b0b61d..9949da61da579 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -47,6 +47,7 @@ use rand_chacha::{ }; use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::BABE; +use futures::executor::block_on; type Item = DigestItem; @@ -67,6 +68,9 @@ enum Stage { type Mutator = Arc; +type BabeBlockImport = + PanickingBlockImport>>; + #[derive(Clone)] struct DummyFactory { client: Arc, @@ -134,7 +138,7 @@ impl DummyProposer { // figure out if we should add a consensus digest, since the test runtime // doesn't. - let epoch_changes = self.factory.epoch_changes.lock(); + let epoch_changes = self.factory.epoch_changes.shared_data(); let epoch = epoch_changes.epoch_data_for_child_of( descendent_query(&*self.factory.client), &self.parent_hash, @@ -178,6 +182,7 @@ impl Proposer for DummyProposer { _: InherentData, pre_digests: DigestFor, _: Duration, + _: Option, ) -> Self::Proposal { self.propose_with(pre_digests) } @@ -188,30 +193,37 @@ thread_local! { } #[derive(Clone)] -struct PanickingBlockImport(B); - -impl> BlockImport for PanickingBlockImport { +pub struct PanickingBlockImport(B); + +#[async_trait::async_trait] +impl> BlockImport for PanickingBlockImport + where + B::Transaction: Send, + B: Send, +{ type Error = B::Error; type Transaction = B::Transaction; - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, new_cache: HashMap>, ) -> Result { - Ok(self.0.import_block(block, new_cache).expect("importing block failed")) + Ok(self.0.import_block(block, new_cache).await.expect("importing block failed")) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - Ok(self.0.check_block(block).expect("checking block failed")) + Ok(self.0.check_block(block).await.expect("checking block failed")) } } +type BabePeer = Peer, BabeBlockImport>; + pub struct BabeTestNet { - peers: Vec>>, + peers: Vec, } type TestHeader = ::Header; @@ -227,11 +239,12 @@ pub struct TestVerifier { mutator: Mutator, } +#[async_trait::async_trait] impl Verifier for TestVerifier { /// Verify the given data and return the BlockImportParams and an optional /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. - fn verify( + async fn verify( &mut self, origin: BlockOrigin, mut header: TestHeader, @@ -240,7 +253,7 @@ impl Verifier for TestVerifier { ) -> Result<(BlockImportParams, Option)>>), String> { // apply post-sealing mutations (i.e. stripping seal, if desired). (self.mutator)(&mut header, Stage::PostSeal); - self.inner.verify(origin, header, justifications, body) + self.inner.verify(dbg!(origin), header, justifications, body).await } } @@ -255,6 +268,7 @@ pub struct PeerData { impl TestNetFactory for BabeTestNet { type Verifier = TestVerifier; type PeerData = Option; + type BlockImport = BabeBlockImport; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -264,9 +278,9 @@ impl TestNetFactory for BabeTestNet { } } - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, Option, ) @@ -287,7 +301,7 @@ impl TestNetFactory for BabeTestNet { Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) ); ( - BlockImportAdapter::new_full(block_import), + BlockImportAdapter::new(block_import), None, Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), ) @@ -326,17 +340,17 @@ impl TestNetFactory for BabeTestNet { } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut BabePeer { trace!(target: "babe", "Retrieving a peer"); &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec { trace!(target: "babe", "Retrieving peers"); &self.peers } - fn mut_peers>)>( + fn mut_peers)>( &mut self, closure: F, ) { @@ -436,7 +450,7 @@ fn run_one_test( telemetry: None, }).expect("Starts babe")); } - futures::executor::block_on(future::select( + block_on(future::select( futures::future::poll_fn(move |cx| { let mut net = net.lock(); net.poll(cx); @@ -567,7 +581,7 @@ fn can_author_block() { } // Propose and import a new BABE block on top of the given parent. -fn propose_and_import_block( +fn propose_and_import_block( parent: &TestHeader, slot: Option, proposer_factory: &mut DummyFactory, @@ -595,7 +609,7 @@ fn propose_and_import_block( let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( + let epoch_descriptor = proposer_factory.epoch_changes.shared_data().epoch_descriptor_for_child_of( descendent_query(&*proposer_factory.client), &parent_hash, *parent.number(), @@ -623,10 +637,10 @@ fn propose_and_import_block( import.body = Some(block.extrinsics); import.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - let import_result = block_import.import_block(import, Default::default()).unwrap(); + let import_result = block_on(block_import.import_block(import, Default::default())).unwrap(); match import_result { ImportResult::Imported(_) => {}, @@ -664,7 +678,7 @@ fn importing_block_one_sets_genesis_epoch() { let genesis_epoch = Epoch::genesis(&data.link.config, 999.into()); - let epoch_changes = data.link.epoch_changes.lock(); + let epoch_changes = data.link.epoch_changes.shared_data(); let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( descendent_query(&*client), &block_hash, @@ -739,13 +753,13 @@ fn importing_epoch_change_block_prunes_tree() { // We should be tracking a total of 9 epochs in the fork tree assert_eq!( - epoch_changes.lock().tree().iter().count(), + epoch_changes.shared_data().tree().iter().count(), 9, ); // And only one root assert_eq!( - epoch_changes.lock().tree().roots().count(), + epoch_changes.shared_data().tree().roots().count(), 1, ); @@ -756,16 +770,16 @@ fn importing_epoch_change_block_prunes_tree() { // at this point no hashes from the first fork must exist on the tree assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), + !epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), ); // but the epoch changes from the other forks must still exist assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) + epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) ); assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), ); // finalizing block #25 from the canon chain should prune out the second fork @@ -774,12 +788,12 @@ fn importing_epoch_change_block_prunes_tree() { // at this point no hashes from the second fork must exist on the tree assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), + !epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), ); // while epoch changes from the last fork should still exist assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + epoch_changes.shared_data().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), ); } diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 53dfd9ed10cee..469286f5110d7 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -71,10 +71,8 @@ pub(super) fn check_header( let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; trace!(target: "babe", "Checking header"); - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), - }; + let seal = header.digest_mut().pop() + .ok_or_else(|| babe_err(Error::HeaderUnsealed(header.hash())))?; let sig = seal.as_babe_seal().ok_or_else(|| { babe_err(Error::HeaderBadSeal(header.hash())) diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 41c42866e7272..5762b9c998b67 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -17,3 +17,4 @@ sc-client-api = { version = "3.0.0", path = "../../api" } sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } +parking_lot = "0.11.1" diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index a53517c5c35ea..9b4d705769196 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -17,6 +17,8 @@ // along with this program. If not, see . //! Collection of common consensus specific implementations + mod longest_chain; +pub mod shared_data; pub use longest_chain::LongestChain; diff --git a/client/consensus/common/src/shared_data.rs b/client/consensus/common/src/shared_data.rs new file mode 100644 index 0000000000000..d90fc6273e056 --- /dev/null +++ b/client/consensus/common/src/shared_data.rs @@ -0,0 +1,271 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Provides a generic wrapper around shared data. See [`SharedData`] for more information. + +use std::sync::Arc; +use parking_lot::{Mutex, MappedMutexGuard, Condvar, MutexGuard}; + +/// Created by [`SharedDataLocked::release_mutex`]. +/// +/// As long as the object isn't dropped, the shared data is locked. It is advised to drop this +/// object when the shared data doesn't need to be locked anymore. To get access to the shared data +/// [`Self::upgrade`] is provided. +#[must_use = "Shared data will be unlocked on drop!"] +pub struct SharedDataLockedUpgradable { + shared_data: SharedData, +} + +impl SharedDataLockedUpgradable { + /// Upgrade to a *real* mutex guard that will give access to the inner data. + /// + /// Every call to this function will reaquire the mutex again. + pub fn upgrade(&mut self) -> MappedMutexGuard { + MutexGuard::map(self.shared_data.inner.lock(), |i| &mut i.shared_data) + } +} + +impl Drop for SharedDataLockedUpgradable { + fn drop(&mut self) { + let mut inner = self.shared_data.inner.lock(); + // It should not be locked anymore + inner.locked = false; + + // Notify all waiting threads. + self.shared_data.cond_var.notify_all(); + } +} + +/// Created by [`SharedData::shared_data_locked`]. +/// +/// As long as this object isn't dropped, the shared data is held in a mutex guard and the shared +/// data is tagged as locked. Access to the shared data is provided through [`Deref`] and +/// [`DerefMut`]. The trick is to use [`Self::release_mutex`] to release the mutex, but still keep +/// the shared data locked. This means every other thread trying to access the shared data in this +/// time will need to wait until this lock is freed. +/// +/// If this object is dropped without calling [`Self::release_mutex`], the lock will be dropped +/// immediately. +#[must_use = "Shared data will be unlocked on drop!"] +pub struct SharedDataLocked<'a, T> { + /// The current active mutex guard holding the inner data. + inner: MutexGuard<'a, SharedDataInner>, + /// The [`SharedData`] instance that created this instance. + /// + /// This instance is only taken on drop or when calling [`Self::release_mutex`]. + shared_data: Option>, +} + +impl<'a, T> SharedDataLocked<'a, T> { + /// Release the mutex, but keep the shared data locked. + pub fn release_mutex(mut self) -> SharedDataLockedUpgradable { + SharedDataLockedUpgradable { + shared_data: self.shared_data.take() + .expect("`shared_data` is only taken on drop; qed"), + } + } +} + +impl<'a, T> Drop for SharedDataLocked<'a, T> { + fn drop(&mut self) { + if let Some(shared_data) = self.shared_data.take() { + // If the `shared_data` is still set, it means [`Self::release_mutex`] wasn't + // called and the lock should be released. + self.inner.locked = false; + + // Notify all waiting threads about the released lock. + shared_data.cond_var.notify_all(); + } + } +} + +impl<'a, T> std::ops::Deref for SharedDataLocked<'a, T> { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.inner.shared_data + } +} + +impl<'a, T> std::ops::DerefMut for SharedDataLocked<'a, T> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner.shared_data + } +} + +/// Holds the shared data and if the shared data is currently locked. +/// +/// For more information see [`SharedData`]. +struct SharedDataInner { + /// The actual shared data that is protected here against concurrent access. + shared_data: T, + /// Is `shared_data` currently locked and can not be accessed? + locked: bool, +} + +/// Some shared data that provides support for locking this shared data for some time. +/// +/// When working with consensus engines there is often data that needs to be shared between multiple +/// parts of the system, like block production and block import. This struct provides an abstraction +/// for this shared data in a generic way. +/// +/// The pain point when sharing this data is often the usage of mutex guards in an async context as +/// this doesn't work for most of them as these guards don't implement `Send`. This abstraction +/// provides a way to lock the shared data, while not having the mutex locked. So, the data stays +/// locked and we are still able to hold this lock over an `await` call. +/// +/// # Example +/// +/// ``` +///# use sc_consensus::shared_data::SharedData; +/// +/// let shared_data = SharedData::new(String::from("hello world")); +/// +/// let lock = shared_data.shared_data_locked(); +/// +/// let shared_data2 = shared_data.clone(); +/// let join_handle1 = std::thread::spawn(move || { +/// // This will need to wait for the outer lock to be released before it can access the data. +/// shared_data2.shared_data().push_str("1"); +/// }); +/// +/// assert_eq!(*lock, "hello world"); +/// +/// // Let us release the mutex, but we still keep it locked. +/// // Now we could call `await` for example. +/// let mut lock = lock.release_mutex(); +/// +/// let shared_data2 = shared_data.clone(); +/// let join_handle2 = std::thread::spawn(move || { +/// shared_data2.shared_data().push_str("2"); +/// }); +/// +/// // We still have the lock and can upgrade it to access the data. +/// assert_eq!(*lock.upgrade(), "hello world"); +/// lock.upgrade().push_str("3"); +/// +/// drop(lock); +/// join_handle1.join().unwrap(); +/// join_handle2.join().unwrap(); +/// +/// let data = shared_data.shared_data(); +/// // As we don't know the order of the threads, we need to check for both combinations +/// assert!(*data == "hello world321" || *data == "hello world312"); +/// ``` +pub struct SharedData { + inner: Arc>>, + cond_var: Arc, +} + +impl Clone for SharedData { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + cond_var: self.cond_var.clone(), + } + } +} + +impl SharedData { + /// Create a new instance of [`SharedData`] to share the given `shared_data`. + pub fn new(shared_data: T) -> Self { + Self { + inner: Arc::new(Mutex::new(SharedDataInner { shared_data, locked: false })), + cond_var: Default::default(), + } + } + + /// Acquire access to the shared data. + /// + /// This will give mutable access to the shared data. After the returned mutex guard is dropped, + /// the shared data is accessible by other threads. So, this function should be used when + /// reading/writing of the shared data in a local context is required. + /// + /// When requiring to lock shared data for some longer time, even with temporarily releasing the + /// lock, [`Self::shared_data_locked`] should be used. + pub fn shared_data(&self) -> MappedMutexGuard { + let mut guard = self.inner.lock(); + + while guard.locked { + self.cond_var.wait(&mut guard); + } + + debug_assert!(!guard.locked); + + MutexGuard::map(guard, |i| &mut i.shared_data) + } + + /// Acquire access to the shared data and lock it. + /// + /// This will give mutable access to the shared data. The returned [`SharedDataLocked`] + /// provides the function [`SharedDataLocked::release_mutex`] to release the mutex, but + /// keeping the data locked. This is useful in async contexts for example where the data needs to + /// be locked, but a mutex guard can not be held. + /// + /// For an example see [`SharedData`]. + pub fn shared_data_locked(&self) -> SharedDataLocked { + let mut guard = self.inner.lock(); + + while guard.locked { + self.cond_var.wait(&mut guard); + } + + debug_assert!(!guard.locked); + guard.locked = true; + + SharedDataLocked { + inner: guard, + shared_data: Some(self.clone()), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn shared_data_locking_works() { + const THREADS: u32 = 100; + let shared_data = SharedData::new(0u32); + + let lock = shared_data.shared_data_locked(); + + for i in 0..THREADS { + let data = shared_data.clone(); + std::thread::spawn(move || { + if i % 2 == 1 { + *data.shared_data() += 1; + } else { + let mut lock = data.shared_data_locked().release_mutex(); + // Give the other threads some time to wake up + std::thread::sleep(std::time::Duration::from_millis(10)); + *lock.upgrade() += 1; + } + }); + } + + let lock = lock.release_mutex(); + std::thread::sleep(std::time::Duration::from_millis(100)); + drop(lock); + + while *shared_data.shared_data() < THREADS { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + } +} diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index bebe6979e694e..8e2fe77100967 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -14,8 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } -parking_lot = "0.11.1" fork-tree = { version = "3.0.0", path = "../../../utils/fork-tree" } sp-runtime = { path = "../../../primitives/runtime" , version = "3.0.0"} sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sc-client-api = { path = "../../api" , version = "3.0.0"} +sc-consensus = { path = "../common" , version = "0.9.0"} diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 5c5ef446993a2..98a3e83530510 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -20,8 +20,7 @@ pub mod migration; -use std::{sync::Arc, ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; -use parking_lot::Mutex; +use std::{ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; use codec::{Encode, Decode}; use fork_tree::ForkTree; use sc_client_api::utils::is_descendent_of; @@ -645,10 +644,12 @@ impl EpochChanges where } /// Type alias to produce the epoch-changes tree from a block type. -pub type EpochChangesFor = EpochChanges<::Hash, NumberFor, Epoch>; +pub type EpochChangesFor = + EpochChanges<::Hash, NumberFor, Epoch>; /// A shared epoch changes tree. -pub type SharedEpochChanges = Arc>>; +pub type SharedEpochChanges = + sc_consensus::shared_data::SharedData>; #[cfg(test)] mod tests { diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 679fd5a3eb388..32cc89034fb1d 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -23,6 +23,7 @@ parking_lot = "0.11.1" codec = { package = "parity-scale-codec", version = "2.0.0" } serde = { version = "1.0", features=["derive"] } assert_matches = "1.3.0" +async-trait = "0.1.42" sc-client-api = { path = "../../api", version = "3.0.0"} sc-consensus-babe = { path = "../../consensus/babe", version = "0.9.0"} diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index a3f8a825e61dd..d627ea2a25c3a 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -21,12 +21,7 @@ use super::ConsensusDataProvider; use crate::Error; use codec::Encode; -use std::{ - any::Any, - borrow::Cow, - sync::{Arc, atomic}, - time::SystemTime, -}; +use std::{borrow::Cow, sync::{Arc, atomic}, time::SystemTime}; use sc_client_api::AuxStore; use sc_consensus_babe::{ Config, Epoch, authorship, CompatibleDigestItem, BabeIntermediate, @@ -102,7 +97,7 @@ impl BabeConsensusDataProvider } fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { - let epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), @@ -156,7 +151,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider authority_index: 0_u32, }); - let mut epoch_changes = self.epoch_changes.lock(); + let mut epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), @@ -200,7 +195,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider inherents: &InherentData ) -> Result<(), Error> { let slot = inherents.babe_inherent_data()?; - let epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.shared_data(); let mut epoch_descriptor = epoch_changes .epoch_descriptor_for_child_of( descendent_query(&*self.client), @@ -239,7 +234,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); Ok(()) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 870640c1f2012..a5351c63bc3b4 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -55,8 +55,9 @@ pub const MANUAL_SEAL_ENGINE_ID: ConsensusEngineId = [b'm', b'a', b'n', b'l']; /// The verifier for the manual seal engine; instantly finalizes. struct ManualSealVerifier; +#[async_trait::async_trait] impl Verifier for ManualSealVerifier { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 2176973f3a298..a8050efb9a075 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -104,10 +104,7 @@ pub async fn seal_block( // or fetch the best_block. let parent = match parent_hash { Some(hash) => { - match client.header(BlockId::Hash(hash))? { - Some(header) => header, - None => return Err(Error::BlockNotFound(format!("{}", hash))), - } + client.header(BlockId::Hash(hash))?.ok_or_else(|| Error::BlockNotFound(format!("{}", hash)))? } None => select_chain.best_chain()? }; @@ -127,6 +124,7 @@ pub async fn seal_block( id.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), + None, ).map_err(|err| Error::StringError(format!("{:?}", err))).await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { @@ -144,7 +142,7 @@ pub async fn seal_block( digest_provider.append_block_import(&parent, &mut params, &id)?; } - match block_import.import_block(params, HashMap::new())? { + match block_import.import_block(params, HashMap::new()).await? { ImportResult::Imported(aux) => { Ok(CreatedBlock { hash: ::Header::hash(&header), aux }) }, diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 8be43a8fa04bc..86b0b1df54e26 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -30,3 +30,4 @@ parking_lot = "0.11.1" sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } derive_more = "0.99.2" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} +async-trait = "0.1.42" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index d1df2875a1cb6..bcbc2009321b8 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -36,7 +36,7 @@ mod worker; pub use crate::worker::{MiningWorker, MiningMetadata, MiningBuild}; use std::{ - sync::Arc, any::Any, borrow::Cow, collections::HashMap, marker::PhantomData, + sync::Arc, borrow::Cow, collections::HashMap, marker::PhantomData, cmp::Ordering, time::Duration, }; use futures::{prelude::*, future::Either}; @@ -307,6 +307,7 @@ impl PowBlockImport wher } } +#[async_trait::async_trait] impl BlockImport for PowBlockImport where B: BlockT, I: BlockImport> + Send + Sync, @@ -314,21 +315,21 @@ impl BlockImport for PowBlockImport, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, C::Api: BlockBuilderApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, - CAW: CanAuthorWith, + Algorithm: PowAlgorithm + Send, + Algorithm::Difficulty: 'static + Send, + CAW: CanAuthorWith + Send, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block).map_err(Into::into) + self.inner.check_block(block).await.map_err(Into::into) } - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -403,7 +404,7 @@ impl BlockImport for PowBlockImport PowVerifier { } } +#[async_trait::async_trait] impl Verifier for PowVerifier where Algorithm: PowAlgorithm + Send + Sync, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: 'static + Send, { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, @@ -473,7 +475,7 @@ impl Verifier for PowVerifier where import_block.justifications = justifications; import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box + Box::new(intermediate) as Box<_>, ); import_block.post_hash = Some(hash); @@ -513,6 +515,7 @@ pub fn import_queue( B: BlockT, Transaction: Send + Sync + 'static, Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, + Algorithm::Difficulty: Send, { register_pow_inherent_data_provider(&inherent_data_providers)?; @@ -556,7 +559,7 @@ pub fn start_mining_worker( C: ProvideRuntimeApi + BlockchainEvents + 'static, S: SelectChain + 'static, Algorithm: PowAlgorithm + Clone, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: Send + 'static, E: Environment + Send + Sync + 'static, E::Error: std::fmt::Debug, E::Proposer: Proposer>, @@ -666,6 +669,7 @@ pub fn start_mining_worker( inherent_data, inherent_digest, build_time.clone(), + None, ).await { Ok(x) => x, Err(err) => { diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index d64596e48cf1a..18844e51ce418 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::{pin::Pin, time::Duration, collections::HashMap, any::Any, borrow::Cow}; +use std::{pin::Pin, time::Duration, collections::HashMap, borrow::Cow}; use sc_client_api::ImportNotifications; use sp_runtime::{DigestItem, traits::Block as BlockT, generic::BlockId}; use sp_consensus::{Proposal, BlockOrigin, BlockImportParams, import_queue::BoxBlockImport}; @@ -68,7 +68,8 @@ impl MiningWorker where Block: BlockT, C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, + Algorithm::Difficulty: 'static + Send, + sp_api::TransactionFor: Send + 'static, { /// Get the current best hash. `None` if the worker has just started or the client is doing /// major syncing. @@ -94,7 +95,7 @@ impl MiningWorker where /// Submit a mined seal. The seal will be validated again. Returns true if the submission is /// successful. - pub fn submit(&mut self, seal: Seal) -> bool { + pub async fn submit(&mut self, seal: Seal) -> bool { if let Some(build) = self.build.take() { match self.algorithm.verify( &BlockId::Hash(build.metadata.best_hash), @@ -135,10 +136,10 @@ impl MiningWorker where import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box + Box::new(intermediate) as Box<_>, ); - match self.block_import.import_block(import_block, HashMap::default()) { + match self.block_import.import_block(import_block, HashMap::default()).await { Ok(_) => { info!( target: "pow", diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 34162cfae71e2..64beea50fcf63 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -31,9 +31,9 @@ sp-inherents = { version = "3.0.0", path = "../../../primitives/inherents" } sp-timestamp = { version = "3.0.0", path = "../../../primitives/timestamp" } futures = "0.3.9" futures-timer = "3.0.1" -parking_lot = "0.11.1" log = "0.4.11" thiserror = "1.0.21" +async-trait = "0.1.42" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 83dd88a8d49ff..c1638fb566326 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -32,12 +32,11 @@ pub use slots::SlotInfo; use slots::Slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; -use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::Duration}; +use std::{fmt::Debug, ops::Deref, time::Duration}; use codec::{Decode, Encode}; -use futures::{prelude::*, future::{self, Either}}; +use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; use log::{debug, error, info, warn}; -use parking_lot::Mutex; use sp_api::{ProvideRuntimeApi, ApiRef}; use sp_arithmetic::traits::BaseArithmetic; use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData}; @@ -68,21 +67,23 @@ pub struct SlotResult { /// /// The implementation should not make any assumptions of the slot being bound to the time or /// similar. The only valid assumption is that the slot number is always increasing. +#[async_trait::async_trait] pub trait SlotWorker { /// Called when a new slot is triggered. /// /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in /// the slot. Otherwise `None` is returned. - fn on_slot( + async fn on_slot( &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>> + Send>>; + ) -> Option>; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at /// its beginning and tries to produce a block if successfully claimed, timing /// out if block production takes too long. +#[async_trait::async_trait] pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. type BlockImport: BlockImport>::Transaction> @@ -96,7 +97,7 @@ pub trait SimpleSlotWorker { + Send + Unpin + 'static; /// The type of proposer to use to build blocks. - type Proposer: Proposer; + type Proposer: Proposer + Send; /// Data associated with a slot claim. type Claim: Send + 'static; @@ -108,7 +109,7 @@ pub trait SimpleSlotWorker { fn logging_target(&self) -> &'static str; /// A handle to a `BlockImport`. - fn block_import(&self) -> Arc>; + fn block_import(&mut self) -> &mut Self::BlockImport; /// Returns the epoch data necessary for authoring. For time-dependent epochs, /// use the provided slot number as a canonical source of time. @@ -191,36 +192,38 @@ pub trait SimpleSlotWorker { ) -> Duration; /// Implements [`SlotWorker::on_slot`]. - fn on_slot( + async fn on_slot( &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>::Proof>>> + Send>> - where - >::Proposal: Unpin + Send + 'static, - { + ) -> Option>::Proof>> { let (timestamp, slot) = (slot_info.timestamp, slot_info.slot); let telemetry = self.telemetry(); + let logging_target = self.logging_target(); let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); let proposing_remaining = if proposing_remaining_duration == Duration::default() { debug!( - target: self.logging_target(), + target: logging_target, "Skipping proposal slot {} since there's no time left to propose", slot, ); - return Box::pin(future::ready(None)); + return None } else { - Box::new(Delay::new(proposing_remaining_duration)) - as Box + Unpin + Send> + Delay::new(proposing_remaining_duration) }; let epoch_data = match self.epoch_data(&chain_head, slot) { Ok(epoch_data) => epoch_data, Err(err) => { - warn!("Unable to fetch epoch data at block {:?}: {:?}", chain_head.hash(), err); + warn!( + target: logging_target, + "Unable to fetch epoch data at block {:?}: {:?}", + chain_head.hash(), + err, + ); telemetry!( telemetry; @@ -230,7 +233,7 @@ pub trait SimpleSlotWorker { "err" => ?err, ); - return Box::pin(future::ready(None)); + return None; } }; @@ -242,7 +245,7 @@ pub trait SimpleSlotWorker { self.sync_oracle().is_offline() && authorities_len.map(|a| a > 1).unwrap_or(false) { - debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); + debug!(target: logging_target, "Skipping proposal slot. Waiting for the network."); telemetry!( telemetry; CONSENSUS_DEBUG; @@ -250,16 +253,13 @@ pub trait SimpleSlotWorker { "authorities_len" => authorities_len, ); - return Box::pin(future::ready(None)); + return None; } - let claim = match self.claim_slot(&chain_head, slot, &epoch_data) { - None => return Box::pin(future::ready(None)), - Some(claim) => claim, - }; + let claim = self.claim_slot(&chain_head, slot, &epoch_data)?; if self.should_backoff(slot, &chain_head) { - return Box::pin(future::ready(None)); + return None; } debug!( @@ -277,10 +277,15 @@ pub trait SimpleSlotWorker { "timestamp" => *timestamp, ); - let awaiting_proposer = { - let telemetry = telemetry.clone(); - self.proposer(&chain_head).map_err(move |err| { - warn!("Unable to author block in slot {:?}: {:?}", slot, err); + let proposer = match self.proposer(&chain_head).await { + Ok(p) => p, + Err(err) => { + warn!( + target: logging_target, + "Unable to author block in slot {:?}: {:?}", + slot, + err, + ); telemetry!( telemetry; @@ -290,8 +295,8 @@ pub trait SimpleSlotWorker { "err" => ?err ); - err - }) + return None + } }; let logs = self.pre_digest_data(slot, &claim); @@ -299,106 +304,128 @@ pub trait SimpleSlotWorker { // deadline our production to 98% of the total time left for proposing. As we deadline // the proposing below to the same total time left, the 2% margin should be enough for // the result to be returned. - let proposing = awaiting_proposer.and_then(move |proposer| proposer.propose( + let proposing = proposer.propose( slot_info.inherent_data, sp_runtime::generic::Digest { logs, }, proposing_remaining_duration.mul_f32(0.98), - ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))); - - let proposal_work = { - let telemetry = telemetry.clone(); - futures::future::select(proposing, proposing_remaining).map(move |v| match v { - Either::Left((b, _)) => b.map(|b| (b, claim)), - Either::Right(_) => { - info!( - "⌛️ Discarding proposal for slot {}; block production took too long", - slot, - ); - // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type="debug")] - info!("👉 Recompile your node in `--release` mode to mitigate this problem."); - telemetry!( - telemetry; - CONSENSUS_INFO; - "slots.discarding_proposal_took_too_long"; - "slot" => *slot, - ); + None, + ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))); - Err(sp_consensus::Error::ClientImport("Timeout in the Slots proposer".into())) - }, - }) + let proposal = match futures::future::select(proposing, proposing_remaining).await { + Either::Left((Ok(p), _)) => p, + Either::Left((Err(err), _)) => { + warn!( + target: logging_target, + "Proposing failed: {:?}", + err, + ); + + return None + }, + Either::Right(_) => { + info!( + target: logging_target, + "⌛️ Discarding proposal for slot {}; block production took too long", + slot, + ); + // If the node was compiled with debug, tell the user to use release optimizations. + #[cfg(build_type="debug")] + info!( + target: logging_target, + "👉 Recompile your node in `--release` mode to mitigate this problem.", + ); + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.discarding_proposal_took_too_long"; + "slot" => *slot, + ); + + return None + }, }; let block_import_params_maker = self.block_import_params(); let block_import = self.block_import(); - let logging_target = self.logging_target(); - - proposal_work.and_then(move |(proposal, claim)| async move { - let (block, storage_proof) = (proposal.block, proposal.proof); - let (header, body) = block.deconstruct(); - let header_num = *header.number(); - let header_hash = header.hash(); - let parent_hash = *header.parent_hash(); - - let block_import_params = block_import_params_maker( - header, - &header_hash, - body.clone(), - proposal.storage_changes, - claim, - epoch_data, - )?; - - info!( - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - header_num, - block_import_params.post_hash(), - header_hash, - ); - telemetry!( - telemetry; - CONSENSUS_INFO; - "slots.pre_sealed_block"; - "header_num" => ?header_num, - "hash_now" => ?block_import_params.post_hash(), - "hash_previously" => ?header_hash, - ); - - let header = block_import_params.post_header(); - if let Err(err) = block_import.lock().import_block(block_import_params, Default::default()) { + let (block, storage_proof) = (proposal.block, proposal.proof); + let (header, body) = block.deconstruct(); + let header_num = *header.number(); + let header_hash = header.hash(); + let parent_hash = *header.parent_hash(); + + let block_import_params = match block_import_params_maker( + header, + &header_hash, + body.clone(), + proposal.storage_changes, + claim, + epoch_data, + ) { + Ok(bi) => bi, + Err(err) => { warn!( target: logging_target, - "Error with block built on {:?}: {:?}", - parent_hash, + "Failed to create block import params: {:?}", err, ); - telemetry!( - telemetry; - CONSENSUS_WARN; - "slots.err_with_block_built_on"; - "hash" => ?parent_hash, - "err" => ?err, - ); + return None } + }; + + info!( + target: logging_target, + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + header_num, + block_import_params.post_hash(), + header_hash, + ); - Ok(SlotResult { block: B::new(header, body), storage_proof }) - }).then(|r| async move { - r.map_err(|e| warn!(target: "slots", "Encountered consensus error: {:?}", e)).ok() - }).boxed() + telemetry!( + telemetry; + CONSENSUS_INFO; + "slots.pre_sealed_block"; + "header_num" => ?header_num, + "hash_now" => ?block_import_params.post_hash(), + "hash_previously" => ?header_hash, + ); + + let header = block_import_params.post_header(); + if let Err(err) = block_import + .import_block(block_import_params, Default::default()) + .await + { + warn!( + target: logging_target, + "Error with block built on {:?}: {:?}", + parent_hash, + err, + ); + + telemetry!( + telemetry; + CONSENSUS_WARN; + "slots.err_with_block_built_on"; + "hash" => ?parent_hash, + "err" => ?err, + ); + } + + Some(SlotResult { block: B::new(header, body), storage_proof }) } } -impl> SlotWorker>::Proof> for T { - fn on_slot( +#[async_trait::async_trait] +impl + Send> SlotWorker>::Proof> for T { + async fn on_slot( &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>::Proof>>> + Send>> { - SimpleSlotWorker::on_slot(self, chain_head, slot_info) + ) -> Option>::Proof>> { + SimpleSlotWorker::on_slot(self, chain_head, slot_info).await } } @@ -436,25 +463,39 @@ where let SlotDuration(slot_duration) = slot_duration; // rather than use a timer interval, we schedule our waits ourselves - Slots::::new( + let mut slots = Slots::::new( slot_duration.slot_duration(), inherent_data_providers, timestamp_extractor, - ).inspect_err(|e| debug!(target: "slots", "Faulty timer: {:?}", e)) - .try_for_each(move |slot_info| { + ); + + async move { + loop { + let slot_info = match slots.next_slot().await { + Ok(slot) => slot, + Err(err) => { + debug!(target: "slots", "Faulty timer: {:?}", err); + return + }, + }; + // only propose when we are not syncing. if sync_oracle.is_major_syncing() { debug!(target: "slots", "Skipping proposal slot due to sync."); - return Either::Right(future::ready(Ok(()))); + continue; } let slot = slot_info.slot; let chain_head = match client.best_chain() { Ok(x) => x, Err(e) => { - warn!(target: "slots", "Unable to author block in slot {}. \ - no best block header: {:?}", slot, e); - return Either::Right(future::ready(Ok(()))); + warn!( + target: "slots", + "Unable to author block in slot {}. No best block header: {:?}", + slot, + e, + ); + continue; } }; @@ -466,19 +507,11 @@ where slot, err, ); - Either::Right(future::ready(Ok(()))) } else { - Either::Left( - worker.on_slot(chain_head, slot_info) - .then(|_| future::ready(Ok(()))) - ) - } - }).then(|res| { - if let Err(err) = res { - warn!(target: "slots", "Slots stream terminated with an error: {:?}", err); + worker.on_slot(chain_head, slot_info).await; } - future::ready(()) - }) + } + } } /// A header which has been checked @@ -500,7 +533,7 @@ pub enum Error where T: Debug { SlotDurationInvalid(SlotDuration), } -/// A slot duration. Create with `get_or_compute`. +/// A slot duration. Create with [`get_or_compute`](Self::get_or_compute). // The internal member should stay private here to maintain invariants of // `get_or_compute`. #[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq)] @@ -548,7 +581,7 @@ impl SlotDuration { cb(client.runtime_api(), &BlockId::number(Zero::zero()))?; info!( - "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", + "⏱ Loaded block-time = {:?} from genesis on first-launch", genesis_slot_duration.slot_duration() ); @@ -758,6 +791,7 @@ mod test { timestamp: Default::default(), inherent_data: Default::default(), ends_at: Instant::now(), + block_size_limit: None, } } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 1cf7c30b9ed9e..1d89ba3bf9927 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -22,10 +22,9 @@ use super::{SlotCompatible, Slot}; use sp_consensus::Error; -use futures::{prelude::*, task::Context, task::Poll}; use sp_inherents::{InherentData, InherentDataProviders}; -use std::{pin::Pin, time::{Duration, Instant}}; +use std::time::{Duration, Instant}; use futures_timer::Delay; /// Returns current duration since unix epoch. @@ -59,6 +58,10 @@ pub struct SlotInfo { pub inherent_data: InherentData, /// Slot duration. pub duration: Duration, + /// Some potential block size limit for the block to be authored at this slot. + /// + /// For more information see [`Proposer::propose`](sp_consensus::Proposer::propose). + pub block_size_limit: Option, } impl SlotInfo { @@ -70,12 +73,14 @@ impl SlotInfo { timestamp: sp_timestamp::Timestamp, inherent_data: InherentData, duration: Duration, + block_size_limit: Option, ) -> Self { Self { slot, timestamp, inherent_data, duration, + block_size_limit, ends_at: Instant::now() + time_until_next(timestamp.as_duration(), duration), } } @@ -107,57 +112,47 @@ impl Slots { } } -impl Stream for Slots { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { +impl Slots { + /// Returns a future that fires when the next slot starts. + pub async fn next_slot(&mut self) -> Result { loop { - let slot_duration = self.slot_duration; self.inner_delay = match self.inner_delay.take() { None => { // schedule wait. - let wait_dur = time_until_next(duration_now(), slot_duration); + let wait_dur = time_until_next(duration_now(), self.slot_duration); Some(Delay::new(wait_dur)) } Some(d) => Some(d), }; - if let Some(ref mut inner_delay) = self.inner_delay { - match Future::poll(Pin::new(inner_delay), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(()) => {} - } + if let Some(inner_delay) = self.inner_delay.take() { + inner_delay.await; } - // timeout has fired. let inherent_data = match self.inherent_data_providers.create_inherent_data() { Ok(id) => id, - Err(err) => return Poll::Ready(Some(Err(sp_consensus::Error::InherentData(err)))), + Err(err) => return Err(sp_consensus::Error::InherentData(err)), }; let result = self.timestamp_extractor.extract_timestamp_and_slot(&inherent_data); - let (timestamp, slot, offset) = match result { - Ok(v) => v, - Err(err) => return Poll::Ready(Some(Err(err))), - }; + let (timestamp, slot, offset) = result?; // reschedule delay for next slot. let ends_in = offset + - time_until_next(timestamp.as_duration(), slot_duration); + time_until_next(timestamp.as_duration(), self.slot_duration); self.inner_delay = Some(Delay::new(ends_in)); // never yield the same slot twice. if slot > self.last_slot { self.last_slot = slot; - break Poll::Ready(Some(Ok(SlotInfo::new( + break Ok(SlotInfo::new( slot, timestamp, inherent_data, self.slot_duration, - )))) + None, + )) } } } } - -impl Unpin for Slots {} diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index f0c187bd379f1..a2501891b31e3 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -31,7 +31,7 @@ use sp_core::{ use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; use sp_state_machine::{ - DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection + DBValue, backend::Backend as StateBackend, StorageCollection, ChildStorageCollection, ProofRecorder, }; use kvdb::{KeyValueDB, DBTransaction}; use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; @@ -44,14 +44,25 @@ type State = CachingState, B>; struct StorageDb { db: Arc, + proof_recorder: Option>, _block: std::marker::PhantomData, } impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); - self.db.get(0, &key) - .map_err(|e| format!("Database backend error: {:?}", e)) + let prefixed_key = prefixed_key::>(key, prefix); + if let Some(recorder) = &self.proof_recorder { + if let Some(v) = recorder.get(&key) { + return Ok(v.clone()); + } + let backend_value = self.db.get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e))?; + recorder.record(key.clone(), backend_value.clone()); + Ok(backend_value) + } else { + self.db.get(0, &prefixed_key) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } } @@ -105,11 +116,12 @@ pub struct BenchmarkingState { child_key_tracker: RefCell, HashMap, KeyTracker>>>, read_write_tracker: RefCell, whitelist: RefCell>, + proof_recorder: Option>, } impl BenchmarkingState { /// Create a new instance that creates a database in a temporary dir. - pub fn new(genesis: Storage, _cache_size_mb: Option) -> Result { + pub fn new(genesis: Storage, _cache_size_mb: Option, record_proof: bool) -> Result { let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); @@ -126,6 +138,7 @@ impl BenchmarkingState { child_key_tracker: Default::default(), read_write_tracker: Default::default(), whitelist: Default::default(), + proof_recorder: record_proof.then(Default::default), }; state.add_whitelist_to_tracker(); @@ -150,10 +163,15 @@ impl BenchmarkingState { *self.state.borrow_mut() = None; let db = match self.db.take() { Some(db) => db, - None => Arc::new(::kvdb_memorydb::create(1)), + None => Arc::new(kvdb_memorydb::create(1)), }; self.db.set(Some(db.clone())); - let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); + self.proof_recorder.as_ref().map(|r| r.reset()); + let storage_db = Arc::new(StorageDb:: { + db, + proof_recorder: self.proof_recorder.clone(), + _block: Default::default() + }); *self.state.borrow_mut() = Some(State::new( DbState::::new(storage_db, self.root.get()), self.shared_cache.clone(), @@ -408,7 +426,8 @@ impl StateBackend> for BenchmarkingState { None } - fn commit(&self, + fn commit( + &self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction, main_storage_changes: StorageCollection, @@ -495,6 +514,10 @@ impl StateBackend> for BenchmarkingState { fn usage_info(&self) -> sp_state_machine::UsageInfo { self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) } + + fn proof_size(&self) -> Option { + self.proof_recorder.as_ref().map(|recorder| recorder.estimate_encoded_size() as u32) + } } impl std::fmt::Debug for BenchmarkingState { @@ -510,7 +533,7 @@ mod test { #[test] fn read_to_main_and_child_tries() { - let bench_state = BenchmarkingState::::new(Default::default(), None) + let bench_state = BenchmarkingState::::new(Default::default(), None, false) .unwrap(); for _ in 0..2 { diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 8051adc1832bc..860ca41730518 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -503,10 +503,8 @@ fn read_tries_meta( meta_column: u32, ) -> ClientResult> { match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => Ok(h), - Err(err) => Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), - }, + Some(h) => Decode::decode(&mut &h[..]) + .map_err(|err| ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), None => Ok(ChangesTriesMeta { oldest_digest_range: None, oldest_pruned_digest_range_end: Zero::zero(), diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 03a6ce2200957..c7bac13e719d9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -699,7 +699,9 @@ pub struct BlockImportOperation { impl BlockImportOperation { fn apply_offchain(&mut self, transaction: &mut Transaction) { + let mut count = 0; for ((prefix, key), value_operation) in self.offchain_storage_updates.drain(..) { + count += 1; let key = crate::offchain::concatenate_prefix_and_key(&prefix, &key); match value_operation { OffchainOverlayedChange::SetValue(val) => @@ -708,6 +710,10 @@ impl BlockImportOperation { transaction.remove(columns::OFFCHAIN, &key), } } + + if count > 0 { + log::debug!(target: "sc_offchain", "Applied {} offchain indexing changes.", count); + } } fn apply_aux(&mut self, transaction: &mut Transaction) { @@ -2744,6 +2750,100 @@ pub(crate) mod tests { } } + #[test] + fn storage_hash_is_cached_correctly() { + let backend = Backend::::new_test(10, 10); + + let hash0 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![(b"test".to_vec(), b"test".to_vec())]; + + header.state_root = op.old_state.storage_root(storage + .iter() + .map(|(x, y)| (&x[..], Some(&y[..]))) + ).0.into(); + let hash = header.hash(); + + op.reset_storage(Storage { + top: storage.into_iter().collect(), + children_default: Default::default(), + }).unwrap(); + op.set_block_data( + header.clone(), + Some(vec![]), + None, + NewBlockState::Best, + ).unwrap(); + + backend.commit_operation(op).unwrap(); + + hash + }; + + let block0_hash = backend.state_at(BlockId::Hash(hash0)) + .unwrap() + .storage_hash(&b"test"[..]) + .unwrap(); + + let hash1 = { + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash0, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![(b"test".to_vec(), Some(b"test2".to_vec()))]; + + let (root, overlay) = op.old_state.storage_root( + storage.iter() + .map(|(k, v)| (&k[..], v.as_ref().map(|v| &v[..]))) + ); + op.update_db_storage(overlay).unwrap(); + header.state_root = root.into(); + let hash = header.hash(); + + op.update_storage(storage, Vec::new()).unwrap(); + op.set_block_data( + header, + Some(vec![]), + None, + NewBlockState::Normal, + ).unwrap(); + + backend.commit_operation(op).unwrap(); + + hash + }; + + { + let header = backend.blockchain().header(BlockId::Hash(hash1)).unwrap().unwrap(); + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, BlockId::Hash(hash0)).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); + backend.commit_operation(op).unwrap(); + } + + let block1_hash = backend.state_at(BlockId::Hash(hash1)) + .unwrap() + .storage_hash(&b"test"[..]) + .unwrap(); + + assert_ne!(block0_hash, block1_hash); + } + #[test] fn test_finalize_non_sequential() { let backend = Backend::::new_test(10, 10); diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 2dde8d5058220..8929972e26e66 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -178,6 +178,7 @@ impl Cache { for a in &m.storage { trace!("Reverting enacted key {:?}", HexDisplay::from(a)); self.lru_storage.remove(a); + self.lru_hashes.remove(a); } for a in &m.child_storage { trace!("Reverting enacted child key {:?}", a); @@ -198,6 +199,7 @@ impl Cache { for a in &m.storage { trace!("Retracted key {:?}", HexDisplay::from(a)); self.lru_storage.remove(a); + self.lru_hashes.remove(a); } for a in &m.child_storage { trace!("Retracted child key {:?}", a); @@ -1185,6 +1187,47 @@ mod tests { assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); } + #[test] + fn reverts_storage_hash() { + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h1a = H256::random(); + let h1b = H256::random(); + + let shared = new_shared_cache::(256*1024, (0,1)); + let mut backend = InMemoryBackend::::default(); + backend.insert(std::iter::once((None, vec![(key.clone(), Some(vec![1]))]))); + + let mut s = CachingState::new( + backend.clone(), + shared.clone(), + Some(root_parent), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h1a), + Some(1), + true, + ); + + let mut s = CachingState::new( + backend.clone(), + shared.clone(), + Some(root_parent), + ); + s.cache.sync_cache(&[], &[h1a], vec![], vec![], Some(h1b), Some(1), true); + + let s = CachingState::new( + backend.clone(), + shared.clone(), + Some(h1b), + ); + assert_eq!(s.storage_hash(&key).unwrap().unwrap(), BlakeTwo256::hash(&vec![1])); + } + #[test] fn should_track_used_size_correctly() { let root_parent = H256::random(); diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 590b994d50e87..7f82cb8489121 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -395,10 +395,8 @@ pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< }; let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = match db.get(COLUMN_META, key) { - Some(id) => db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok()), - None => None, - } + if let Some(Some(header)) = db.get(COLUMN_META, key) + .and_then(|id| db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok())) { let hash = header.hash(); debug!( diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index f086be43d24c1..342a508357968 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -30,6 +30,7 @@ sp-api = { version = "3.0.0", path = "../../primitives/api" } sp-wasm-interface = { version = "3.0.0", path = "../../primitives/wasm-interface" } sp-runtime-interface = { version = "3.0.0", path = "../../primitives/runtime-interface" } sp-externalities = { version = "0.9.0", path = "../../primitives/externalities" } +sp-maybe-compressed-blob = { version = "3.0.0", path = "../../primitives/maybe-compressed-blob" } sc-executor-common = { version = "0.9.0", path = "common" } sc-executor-wasmi = { version = "0.9.0", path = "wasmi" } sc-executor-wasmtime = { version = "0.9.0", path = "wasmtime", optional = true } diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 7e13e37d33fbe..95c090686e83b 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" parity-wasm = "0.41.0" +pwasm-utils = "0.14.0" codec = { package = "parity-scale-codec", version = "2.0.0" } wasmi = "0.6.2" sp-core = { version = "3.0.0", path = "../../../primitives/core" } diff --git a/client/executor/common/src/lib.rs b/client/executor/common/src/lib.rs index 050bad27d6c30..25e06314aba39 100644 --- a/client/executor/common/src/lib.rs +++ b/client/executor/common/src/lib.rs @@ -23,5 +23,5 @@ pub mod error; pub mod sandbox; -pub mod util; pub mod wasm_runtime; +pub mod runtime_blob; diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs similarity index 64% rename from client/executor/common/src/util.rs rename to client/executor/common/src/runtime_blob/data_segments_snapshot.rs index 5947be4469cd0..3850ec6753bef 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/runtime_blob/data_segments_snapshot.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 // This program is free software: you can redistribute it and/or modify @@ -16,53 +16,10 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! A set of utilities for resetting a wasm instance to its initial state. - use crate::error::{self, Error}; +use super::RuntimeBlob; use std::mem; -use parity_wasm::elements::{deserialize_buffer, DataSegment, Instruction, Module as RawModule}; - -/// A bunch of information collected from a WebAssembly module. -pub struct WasmModuleInfo { - raw_module: RawModule, -} - -impl WasmModuleInfo { - /// Create `WasmModuleInfo` from the given wasm code. - /// - /// Returns `None` if the wasm code cannot be deserialized. - pub fn new(wasm_code: &[u8]) -> Option { - let raw_module: RawModule = deserialize_buffer(wasm_code).ok()?; - Some(Self { raw_module }) - } - - /// Extract the data segments from the given wasm code. - /// - /// Returns `Err` if the given wasm code cannot be deserialized. - fn data_segments(&self) -> Vec { - self.raw_module - .data_section() - .map(|ds| ds.entries()) - .unwrap_or(&[]) - .to_vec() - } - - /// The number of globals defined in locally in this module. - pub fn declared_globals_count(&self) -> u32 { - self.raw_module - .global_section() - .map(|gs| gs.entries().len() as u32) - .unwrap_or(0) - } - - /// The number of imports of globals. - pub fn imported_globals_count(&self) -> u32 { - self.raw_module - .import_section() - .map(|is| is.globals() as u32) - .unwrap_or(0) - } -} +use parity_wasm::elements::Instruction; /// This is a snapshot of data segments specialzied for a particular instantiation. /// @@ -75,7 +32,7 @@ pub struct DataSegmentsSnapshot { impl DataSegmentsSnapshot { /// Create a snapshot from the data segments from the module. - pub fn take(module: &WasmModuleInfo) -> error::Result { + pub fn take(module: &RuntimeBlob) -> error::Result { let data_segments = module .data_segments() .into_iter() @@ -105,9 +62,7 @@ impl DataSegmentsSnapshot { // if/when we gain those. return Err(Error::ImportedGlobalsUnsupported); } - insn => { - return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))) - } + insn => return Err(Error::InvalidInitializerExpression(format!("{:?}", insn))), }; Ok((offset, contents)) diff --git a/client/executor/common/src/runtime_blob/globals_snapshot.rs b/client/executor/common/src/runtime_blob/globals_snapshot.rs new file mode 100644 index 0000000000000..a43814e1d4e14 --- /dev/null +++ b/client/executor/common/src/runtime_blob/globals_snapshot.rs @@ -0,0 +1,110 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use super::RuntimeBlob; + +/// Saved value of particular exported global. +struct SavedValue { + /// The handle of this global which can be used to refer to this global. + handle: Global, + /// The global value that was observed during the snapshot creation. + value: sp_wasm_interface::Value, +} + +/// An adapter for a wasm module instance that is focused on getting and setting globals. +pub trait InstanceGlobals { + /// A handle to a global which can be used to get or set a global variable. This is supposed to + /// be a lightweight handle, like an index or an Rc-like smart-pointer, which is cheap to clone. + type Global: Clone; + /// Get a handle to a global by it's export name. + /// + /// The requested export is must exist in the exported list, and it should be a mutable global. + fn get_global(&self, export_name: &str) -> Self::Global; + /// Get the current value of the global. + fn get_global_value(&self, global: &Self::Global) -> sp_wasm_interface::Value; + /// Update the current value of the global. + /// + /// The global behind the handle is guaranteed to be mutable and the value to be the same type + /// as the global. + fn set_global_value(&self, global: &Self::Global, value: sp_wasm_interface::Value); +} + +/// A set of exposed mutable globals. +/// +/// This is set of globals required to create a [`GlobalsSnapshot`] and that are collected from +/// a runtime blob that was instrumented by [`InstrumentModule::expose_mutable_globals`]. +/// +/// If the code wasn't instrumented then it would be empty and snapshot would do nothing. +pub struct ExposedMutableGlobalsSet(Vec); + +impl ExposedMutableGlobalsSet { + /// Collect the set from the given runtime blob. See the struct documentation for details. + pub fn collect(runtime_blob: &RuntimeBlob) -> Self { + let global_names = runtime_blob + .exported_internal_global_names() + .map(ToOwned::to_owned) + .collect(); + Self(global_names) + } +} + +/// A snapshot of a global variables values. This snapshot can be later used for restoring the +/// values to the preserved state. +/// +/// Technically, a snapshot stores only values of mutable global variables. This is because +/// immutable global variables always have the same values. +/// +/// We take it from an instance rather from a module because the start function could potentially +/// change any of the mutable global values. +pub struct GlobalsSnapshot(Vec>); + +impl GlobalsSnapshot { + /// Take a snapshot of global variables for a given instance. + /// + /// # Panics + /// + /// This function panics if the instance doesn't correspond to the module from which the + /// [`ExposedMutableGlobalsSet`] was collected. + pub fn take(mutable_globals: &ExposedMutableGlobalsSet, instance: &Instance) -> Self + where + Instance: InstanceGlobals, + { + let global_names = &mutable_globals.0; + let mut saved_values = Vec::with_capacity(global_names.len()); + + for global_name in global_names { + let handle = instance.get_global(global_name); + let value = instance.get_global_value(&handle); + saved_values.push(SavedValue { handle, value }); + } + + Self(saved_values) + } + + /// Apply the snapshot to the given instance. + /// + /// This instance must be the same that was used for creation of this snapshot. + pub fn apply(&self, instance: &Instance) + where + Instance: InstanceGlobals, + { + for saved_value in &self.0 { + instance.set_global_value(&saved_value.handle, saved_value.value); + } + } +} diff --git a/client/executor/common/src/runtime_blob/mod.rs b/client/executor/common/src/runtime_blob/mod.rs new file mode 100644 index 0000000000000..372df7bd97eb7 --- /dev/null +++ b/client/executor/common/src/runtime_blob/mod.rs @@ -0,0 +1,57 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! This module allows for inspection and instrumentation, i.e. modifying the module to alter it's +//! structure or behavior, of a wasm module. +//! +//! ## Instrumentation +//! +//! In ideal world, there would be no instrumentation. However, in the real world the execution +//! engines we use are somewhat limited in their APIs or abilities. +//! +//! To give you some examples: +//! +//! - wasmi allows reaching to non-exported mutable globals so that we could reset them. +//! Wasmtime doesn’t support that. +//! +//! We need to reset the globals because when we +//! execute the Substrate Runtime, we do not drop and create the instance anew, instead +//! we restore some selected parts of the state. +//! +//! - stack depth metering can be performed via instrumentation or deferred to the engine and say +//! be added directly in machine code. Implementing this in machine code is rather cumbersome so +//! instrumentation looks like a good solution. +//! +//! Stack depth metering is needed to make a wasm blob +//! execution deterministic, which in turn is needed by the Parachain Validation Function in Polkadot. +//! +//! ## Inspection +//! +//! Inspection of a wasm module may be needed to extract some useful information, such as to extract +//! data segment snapshot, which is helpful for quickly restoring the initial state of instances. +//! Inspection can be also useful to prove that a wasm module possesses some properties, such as, +//! is free of any floating point operations, which is a useful step towards making instances produced +//! from such a module deterministic. + +mod data_segments_snapshot; +mod globals_snapshot; +mod runtime_blob; + +pub use data_segments_snapshot::DataSegmentsSnapshot; +pub use globals_snapshot::{GlobalsSnapshot, ExposedMutableGlobalsSet, InstanceGlobals}; +pub use runtime_blob::RuntimeBlob; diff --git a/client/executor/common/src/runtime_blob/runtime_blob.rs b/client/executor/common/src/runtime_blob/runtime_blob.rs new file mode 100644 index 0000000000000..d90a48fde0c81 --- /dev/null +++ b/client/executor/common/src/runtime_blob/runtime_blob.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use parity_wasm::elements::{DataSegment, Module as RawModule, deserialize_buffer, serialize}; + +use crate::error::WasmError; + +/// A bunch of information collected from a WebAssembly module. +#[derive(Clone)] +pub struct RuntimeBlob { + raw_module: RawModule, +} + +impl RuntimeBlob { + /// Create `RuntimeBlob` from the given wasm code. + /// + /// Returns `Err` if the wasm code cannot be deserialized. + pub fn new(wasm_code: &[u8]) -> Result { + let raw_module: RawModule = deserialize_buffer(wasm_code) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {:?}", e)))?; + Ok(Self { raw_module }) + } + + /// Extract the data segments from the given wasm code. + pub(super) fn data_segments(&self) -> Vec { + self.raw_module + .data_section() + .map(|ds| ds.entries()) + .unwrap_or(&[]) + .to_vec() + } + + /// The number of globals defined in locally in this module. + pub fn declared_globals_count(&self) -> u32 { + self.raw_module + .global_section() + .map(|gs| gs.entries().len() as u32) + .unwrap_or(0) + } + + /// The number of imports of globals. + pub fn imported_globals_count(&self) -> u32 { + self.raw_module + .import_section() + .map(|is| is.globals() as u32) + .unwrap_or(0) + } + + /// Perform an instrumentation that makes sure that the mutable globals are exported. + pub fn expose_mutable_globals(&mut self) { + pwasm_utils::export_mutable_globals(&mut self.raw_module, "exported_internal_global"); + } + + /// Returns an iterator of all globals which were exported by [`expose_mutable_globals`]. + pub(super) fn exported_internal_global_names<'module>( + &'module self, + ) -> impl Iterator { + let exports = self + .raw_module + .export_section() + .map(|es| es.entries()) + .unwrap_or(&[]); + exports.iter().filter_map(|export| match export.internal() { + parity_wasm::elements::Internal::Global(_) + if export.field().starts_with("exported_internal_global") => + { + Some(export.field()) + } + _ => None, + }) + } + + /// Consumes this runtime blob and serializes it. + pub fn serialize(self) -> Vec { + serialize(self.raw_module) + .expect("serializing into a vec should succeed; qed") + } +} diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 351a2b5f40f00..53968a645c994 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -283,6 +283,11 @@ pub fn create_wasm_runtime_with_code( allow_missing_func_imports: bool, cache_path: Option<&Path>, ) -> Result, WasmError> { + use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; + + let code = sp_maybe_compressed_blob::decompress(code, CODE_BLOB_BOMB_LIMIT) + .map_err(|e| WasmError::Other(format!("Decompression error: {:?}", e)))?; + match wasm_method { WasmExecutionMethod::Interpreted => { // Wasmi doesn't have any need in a cache directory. @@ -292,7 +297,7 @@ pub fn create_wasm_runtime_with_code( drop(cache_path); sc_executor_wasmi::create_runtime( - code, + &code, heap_pages, host_functions, allow_missing_func_imports, @@ -300,14 +305,22 @@ pub fn create_wasm_runtime_with_code( .map(|runtime| -> Arc { Arc::new(runtime) }) } #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => + WasmExecutionMethod::Compiled => { + let blob = sc_executor_common::runtime_blob::RuntimeBlob::new(&code)?; sc_executor_wasmtime::create_runtime( - code, - heap_pages, + sc_executor_wasmtime::CodeSupplyMode::Verbatim { blob }, + sc_executor_wasmtime::Config { + heap_pages: heap_pages as u32, + allow_missing_func_imports, + cache_path: cache_path.map(ToOwned::to_owned), + semantics: sc_executor_wasmtime::Semantics { + fast_instance_reuse: true, + stack_depth_metering: false, + }, + }, host_functions, - allow_missing_func_imports, - cache_path, - ).map(|runtime| -> Arc { Arc::new(runtime) }), + ).map(|runtime| -> Arc { Arc::new(runtime) }) + }, } } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index e6a6ef3a61039..0163e07e654bf 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -36,7 +36,7 @@ use sc_executor_common::{ error::{Error, WasmError}, sandbox, }; -use sc_executor_common::util::{DataSegmentsSnapshot, WasmModuleInfo}; +use sc_executor_common::runtime_blob::{RuntimeBlob, DataSegmentsSnapshot}; struct FunctionExecutor<'a> { sandbox_store: sandbox::Store, @@ -661,11 +661,8 @@ pub fn create_runtime( ) .map_err(|e| WasmError::Instantiation(e.to_string()))?; - let data_segments_snapshot = DataSegmentsSnapshot::take( - &WasmModuleInfo::new(code) - .ok_or_else(|| WasmError::Other("cannot deserialize module".to_string()))?, - ) - .map_err(|e| WasmError::Other(e.to_string()))?; + let data_segments_snapshot = DataSegmentsSnapshot::take(&RuntimeBlob::new(code)?) + .map_err(|e| WasmError::Other(e.to_string()))?; let global_vals_snapshot = GlobalValsSnapshot::take(&instance); (data_segments_snapshot, global_vals_snapshot) diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 051b314e4498a..b9f2dd1a9d92f 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -22,7 +22,7 @@ sp-wasm-interface = { version = "3.0.0", path = "../../../primitives/wasm-interf sp-runtime-interface = { version = "3.0.0", path = "../../../primitives/runtime-interface" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-allocator = { version = "3.0.0", path = "../../../primitives/allocator" } -wasmtime = "0.22" +wasmtime = "0.24.0" pwasm-utils = "0.14.0" [dev-dependencies] diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index 08cedd434e366..21b7728c323c8 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -16,9 +16,9 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::state_holder; +use crate::{state_holder, util}; use sc_executor_common::error::WasmError; -use sp_wasm_interface::{Function, Value, ValueType}; +use sp_wasm_interface::{Function, ValueType}; use std::any::Any; use wasmtime::{ Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, @@ -187,12 +187,12 @@ fn call_static( qed ", ); - // `into_value` panics if it encounters a value that doesn't fit into the values + // `from_wasmtime_val` panics if it encounters a value that doesn't fit into the values // available in substrate. // // This, however, cannot happen since the signature of this function is created from // a `dyn Function` signature of which cannot have a non substrate value by definition. - let mut params = wasmtime_params.iter().cloned().map(into_value); + let mut params = wasmtime_params.iter().cloned().map(util::from_wasmtime_val); std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { static_func.execute(&mut host_ctx, &mut params) @@ -211,7 +211,7 @@ fn call_static( "wasmtime function signature, therefore the number of results, should always \ correspond to the number of results returned by the host function", ); - wasmtime_results[0] = into_wasmtime_val(ret_val); + wasmtime_results[0] = util::into_wasmtime_val(ret_val); Ok(()) } Ok(None) => { @@ -295,28 +295,6 @@ fn into_wasmtime_val_type(val_ty: ValueType) -> wasmtime::ValType { } } -/// Converts a `Val` into a substrate runtime interface `Value`. -/// -/// Panics if the given value doesn't have a corresponding variant in `Value`. -pub fn into_value(val: Val) -> Value { - match val { - Val::I32(v) => Value::I32(v), - Val::I64(v) => Value::I64(v), - Val::F32(f_bits) => Value::F32(f_bits), - Val::F64(f_bits) => Value::F64(f_bits), - _ => panic!("Given value type is unsupported by substrate"), - } -} - -pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { - match value { - Value::I32(v) => Val::I32(v), - Value::I64(v) => Val::I64(v), - Value::F32(f_bits) => Val::F32(f_bits), - Value::F64(f_bits) => Val::F64(f_bits), - } -} - /// Attempt to convert a opaque panic payload to a string. fn stringify_panic_payload(payload: Box) -> String { match payload.downcast::<&'static str>() { diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index f0543a7ef9506..fec88a472fb9b 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -25,53 +25,11 @@ use crate::imports::Imports; use std::{slice, marker}; use sc_executor_common::{ error::{Error, Result}, - util::{WasmModuleInfo, DataSegmentsSnapshot}, + runtime_blob, wasm_runtime::InvokeMethod, }; use sp_wasm_interface::{Pointer, WordSize, Value}; -use wasmtime::{Engine, Instance, Module, Memory, Table, Val, Func, Extern, Global, Store}; -use parity_wasm::elements; - -mod globals_snapshot; - -pub use globals_snapshot::GlobalsSnapshot; - -pub struct ModuleWrapper { - module: Module, - data_segments_snapshot: DataSegmentsSnapshot, -} - -impl ModuleWrapper { - pub fn new(engine: &Engine, code: &[u8]) -> Result { - let mut raw_module: elements::Module = elements::deserialize_buffer(code) - .map_err(|e| Error::from(format!("cannot decode module: {}", e)))?; - pwasm_utils::export_mutable_globals(&mut raw_module, "exported_internal_global"); - let instrumented_code = elements::serialize(raw_module) - .map_err(|e| Error::from(format!("cannot encode module: {}", e)))?; - - let module = Module::new(engine, &instrumented_code) - .map_err(|e| Error::from(format!("cannot create module: {}", e)))?; - - let module_info = WasmModuleInfo::new(code) - .ok_or_else(|| Error::from("cannot deserialize module".to_string()))?; - - let data_segments_snapshot = DataSegmentsSnapshot::take(&module_info) - .map_err(|e| Error::from(format!("cannot take data segments snapshot: {}", e)))?; - - Ok(Self { - module, - data_segments_snapshot, - }) - } - - pub fn module(&self) -> &Module { - &self.module - } - - pub fn data_segments_snapshot(&self) -> &DataSegmentsSnapshot { - &self.data_segments_snapshot - } -} +use wasmtime::{Instance, Module, Memory, Table, Val, Func, Extern, Global, Store}; /// Invoked entrypoint format. pub enum EntryPointType { @@ -197,8 +155,8 @@ fn extern_func(extern_: &Extern) -> Option<&Func> { impl InstanceWrapper { /// Create a new instance wrapper from the given wasm module. - pub fn new(store: &Store, module_wrapper: &ModuleWrapper, imports: &Imports, heap_pages: u32) -> Result { - let instance = Instance::new(store, &module_wrapper.module, &imports.externs) + pub fn new(store: &Store, module: &Module, imports: &Imports, heap_pages: u32) -> Result { + let instance = Instance::new(store, module, &imports.externs) .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { @@ -462,3 +420,23 @@ impl InstanceWrapper { } } } + +impl runtime_blob::InstanceGlobals for InstanceWrapper { + type Global = wasmtime::Global; + + fn get_global(&self, export_name: &str) -> Self::Global { + self.instance + .get_global(export_name) + .expect("get_global is guaranteed to be called with an export name of a global; qed") + } + + fn get_global_value(&self, global: &Self::Global) -> Value { + util::from_wasmtime_val(global.get()) + } + + fn set_global_value(&self, global: &Self::Global, value: Value) { + global.set(util::into_wasmtime_val(value)).expect( + "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", + ); + } +} diff --git a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs deleted file mode 100644 index a6b1ed394150d..0000000000000 --- a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs +++ /dev/null @@ -1,84 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use super::InstanceWrapper; -use sc_executor_common::error::{Result, Error}; -use sp_wasm_interface::Value; -use crate::imports::{into_value, into_wasmtime_val}; - -/// Saved value of particular exported global. -struct SavedValue { - /// Index of the export. - index: usize, - /// Global value. - value: Value, -} - -/// A snapshot of a global variables values. This snapshot can be used later for restoring the -/// values to the preserved state. -/// -/// Technically, a snapshot stores only values of mutable global variables. This is because -/// immutable global variables always have the same values. -pub struct GlobalsSnapshot(Vec); - -impl GlobalsSnapshot { - /// Take a snapshot of global variables for a given instance. - pub fn take(instance_wrapper: &InstanceWrapper) -> Result { - let data = instance_wrapper.instance - .exports() - .enumerate() - .filter_map(|(index, export)| { - if export.name().starts_with("exported_internal_global") { - export.into_global().map( - |g| SavedValue { index, value: into_value(g.get()) } - ) - } else { None } - }) - .collect::>(); - - Ok(Self(data)) - } - - /// Apply the snapshot to the given instance. - /// - /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance_wrapper: &InstanceWrapper) -> Result<()> { - // This is a pointer over saved items, it moves forward when the loop value below takes over it's current value. - // Since both pointers (`current` and `index` below) are over ordered lists, they eventually hit all - // equal referenced values. - let mut current = 0; - for (index, export) in instance_wrapper.instance.exports().enumerate() { - if current >= self.0.len() { break; } - let current_saved = &self.0[current]; - if index < current_saved.index { continue; } - else if index > current_saved.index { current += 1; continue; } - else { - export.into_global() - .ok_or_else(|| Error::Other( - "Wrong instance in GlobalsSnapshot::apply: what should be global is not global.".to_string() - ))? - .set(into_wasmtime_val(current_saved.value)) - .map_err(|_e| Error::Other( - "Wrong instance in GlobalsSnapshot::apply: global saved type does not matched applied.".to_string() - ))?; - } - } - - Ok(()) - } -} diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index db7776d4c5845..3679c15249653 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -17,12 +17,11 @@ // along with this program. If not, see . ///! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. - mod host; -mod runtime; -mod state_holder; mod imports; mod instance_wrapper; +mod runtime; +mod state_holder; mod util; -pub use runtime::create_runtime; +pub use runtime::{create_runtime, prepare_runtime_artifact, CodeSupplyMode, Config, Semantics}; diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 64ad5a1f4e49f..103b37a681e8b 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -20,27 +20,57 @@ use crate::host::HostState; use crate::imports::{Imports, resolve_imports}; -use crate::instance_wrapper::{ModuleWrapper, InstanceWrapper, GlobalsSnapshot, EntryPoint}; +use crate::instance_wrapper::{InstanceWrapper, EntryPoint}; use crate::state_holder; -use std::rc::Rc; +use std::{path::PathBuf, rc::Rc}; use std::sync::Arc; use std::path::Path; use sc_executor_common::{ error::{Result, WasmError}, + runtime_blob::{DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob}, wasm_runtime::{WasmModule, WasmInstance, InvokeMethod}, }; use sp_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{Function, Pointer, WordSize, Value}; -use wasmtime::{Config, Engine, Store}; +use wasmtime::{Engine, Store}; + +enum Strategy { + FastInstanceReuse { + instance_wrapper: Rc, + globals_snapshot: GlobalsSnapshot, + data_segments_snapshot: Arc, + heap_base: u32, + }, + RecreateInstance(InstanceCreator), +} + +struct InstanceCreator { + store: Store, + module: Arc, + imports: Arc, + heap_pages: u32, +} + +impl InstanceCreator { + fn instantiate(&self) -> Result { + InstanceWrapper::new(&self.store, &*self.module, &*self.imports, self.heap_pages) + } +} + +/// Data required for creating instances with the fast instance reuse strategy. +struct InstanceSnapshotData { + mutable_globals: ExposedMutableGlobalsSet, + data_segments_snapshot: Arc, +} /// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - module_wrapper: Arc, - heap_pages: u32, - allow_missing_func_imports: bool, + module: Arc, + snapshot_data: Option, + config: Config, host_functions: Vec<&'static dyn Function>, engine: Engine, } @@ -51,41 +81,52 @@ impl WasmModule for WasmtimeRuntime { // Scan all imports, find the matching host functions, and create stubs that adapt arguments // and results. + // + // NOTE: Attentive reader may notice that this could've been moved in `WasmModule` creation. + // However, I am not sure if that's a good idea since it would be pushing our luck further + // by assuming that `Store` not only `Send` but also `Sync`. let imports = resolve_imports( &store, - self.module_wrapper.module(), + &self.module, &self.host_functions, - self.heap_pages, - self.allow_missing_func_imports, + self.config.heap_pages, + self.config.allow_missing_func_imports, )?; - let instance_wrapper = - InstanceWrapper::new(&store, &self.module_wrapper, &imports, self.heap_pages)?; - let heap_base = instance_wrapper.extract_heap_base()?; - let globals_snapshot = GlobalsSnapshot::take(&instance_wrapper)?; - - Ok(Box::new(WasmtimeInstance { - store, - instance_wrapper: Rc::new(instance_wrapper), - module_wrapper: Arc::clone(&self.module_wrapper), - imports, - globals_snapshot, - heap_pages: self.heap_pages, - heap_base, - })) + let strategy = if let Some(ref snapshot_data) = self.snapshot_data { + let instance_wrapper = + InstanceWrapper::new(&store, &self.module, &imports, self.config.heap_pages)?; + let heap_base = instance_wrapper.extract_heap_base()?; + + // This function panics if the instance was created from a runtime blob different from which + // the mutable globals were collected. Here, it is easy to see that there is only a single + // runtime blob and thus it's the same that was used for both creating the instance and + // collecting the mutable globals. + let globals_snapshot = GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper); + + Strategy::FastInstanceReuse { + instance_wrapper: Rc::new(instance_wrapper), + globals_snapshot, + data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), + heap_base, + } + } else { + Strategy::RecreateInstance(InstanceCreator { + imports: Arc::new(imports), + module: self.module.clone(), + store, + heap_pages: self.config.heap_pages, + }) + }; + + Ok(Box::new(WasmtimeInstance { strategy })) } } /// A `WasmInstance` implementation that reuses compiled module and spawns instances /// to execute the compiled code. pub struct WasmtimeInstance { - store: Store, - module_wrapper: Arc, - instance_wrapper: Rc, - globals_snapshot: GlobalsSnapshot, - imports: Imports, - heap_pages: u32, - heap_base: u32, + strategy: Strategy, } // This is safe because `WasmtimeInstance` does not leak reference to `self.imports` @@ -94,29 +135,43 @@ unsafe impl Send for WasmtimeInstance {} impl WasmInstance for WasmtimeInstance { fn call(&self, method: InvokeMethod, data: &[u8]) -> Result> { - let entrypoint = self.instance_wrapper.resolve_entrypoint(method)?; - let allocator = FreeingBumpHeapAllocator::new(self.heap_base); - - self.module_wrapper - .data_segments_snapshot() - .apply(|offset, contents| { - self.instance_wrapper - .write_memory_from(Pointer::new(offset), contents) - })?; - - self.globals_snapshot.apply(&*self.instance_wrapper)?; - - perform_call( - data, - Rc::clone(&self.instance_wrapper), - entrypoint, - allocator, - ) + match &self.strategy { + Strategy::FastInstanceReuse { + instance_wrapper, + globals_snapshot, + data_segments_snapshot, + heap_base, + } => { + let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + + data_segments_snapshot.apply(|offset, contents| { + instance_wrapper.write_memory_from(Pointer::new(offset), contents) + })?; + globals_snapshot.apply(&**instance_wrapper); + let allocator = FreeingBumpHeapAllocator::new(*heap_base); + + perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator) + } + Strategy::RecreateInstance(instance_creator) => { + let instance_wrapper = instance_creator.instantiate()?; + let heap_base = instance_wrapper.extract_heap_base()?; + let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + + let allocator = FreeingBumpHeapAllocator::new(heap_base); + perform_call(data, Rc::new(instance_wrapper), entrypoint, allocator) + } + } } fn get_global_const(&self, name: &str) -> Result> { - let instance = InstanceWrapper::new(&self.store, &self.module_wrapper, &self.imports, self.heap_pages)?; - instance.get_global_val(name) + match &self.strategy { + Strategy::FastInstanceReuse { + instance_wrapper, .. + } => instance_wrapper.get_global_val(name), + Strategy::RecreateInstance(instance_creator) => { + instance_creator.instantiate()?.get_global_val(name) + } + } } } @@ -125,7 +180,7 @@ impl WasmInstance for WasmtimeInstance { /// In case of an error the caching will not be enabled. fn setup_wasmtime_caching( cache_path: &Path, - config: &mut Config, + config: &mut wasmtime::Config, ) -> std::result::Result<(), String> { use std::fs; @@ -158,22 +213,99 @@ directory = \"{cache_dir}\" Ok(()) } +fn common_config() -> wasmtime::Config { + let mut config = wasmtime::Config::new(); + config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); + config +} + +pub struct Semantics { + /// Enabling this will lead to some optimization shenanigans that make calling [`WasmInstance`] + /// extermely fast. + /// + /// Primarily this is achieved by not recreating the instance for each call and performing a + /// bare minimum clean up: reapplying the data segments and restoring the values for global + /// variables. The vast majority of the linear memory is not restored, meaning that effects + /// of previous executions on the same [`WasmInstance`] can be observed there. + /// + /// This is not a problem for a standard substrate runtime execution because it's up to the + /// runtime itself to make sure that it doesn't involve any non-determinism. + /// + /// Since this feature depends on instrumentation, it can be set only if [`CodeSupplyMode::Verbatim`] + /// is used. + pub fast_instance_reuse: bool, + + /// The WebAssembly standard defines a call/value stack but it doesn't say anything about its + /// size except that it has to be finite. The implementations are free to choose their own notion + /// of limit: some may count the number of calls or values, others would rely on the host machine + /// stack and trap on reaching a guard page. + /// + /// This obviously is a source of non-determinism during execution. This feature can be used + /// to instrument the code so that it will count the depth of execution in some deterministic + /// way (the machine stack limit should be so high that the deterministic limit always triggers + /// first). + /// + /// See [here][stack_height] for more details of the instrumentation + /// + /// Since this feature depends on instrumentation, it can be set only if [`CodeSupplyMode::Verbatim`] + /// is used. + /// + /// [stack_height]: https://github.com/paritytech/wasm-utils/blob/d9432baf/src/stack_height/mod.rs#L1-L50 + pub stack_depth_metering: bool, + // Other things like nan canonicalization can be added here. +} + +pub struct Config { + /// The number of wasm pages to be mounted after instantiation. + pub heap_pages: u32, + + /// The WebAssembly standard requires all imports of an instantiated module to be resolved, + /// othewise, the instantiation fails. If this option is set to `true`, then this behavior is + /// overriden and imports that are requested by the module and not provided by the host functions + /// will be resolved using stubs. These stubs will trap upon a call. + pub allow_missing_func_imports: bool, + + /// A directory in which wasmtime can store its compiled artifacts cache. + pub cache_path: Option, + + /// Tuning of various semantics of the wasmtime executor. + pub semantics: Semantics, +} + +pub enum CodeSupplyMode<'a> { + /// The runtime is instantiated using the given runtime blob. + Verbatim { + // Rationale to take the `RuntimeBlob` here is so that the client will be able to reuse + // the blob e.g. if they did a prevalidation. If they didn't they can pass a `RuntimeBlob` + // instance and it will be used anyway in most cases, because we are going to do at least + // some instrumentations for both anticipated paths: substrate execution and PVF execution. + // + // Should there raise a need in performing no instrumentation and the client doesn't need + // to do any checks, then we can provide a `Cow` like semantics here: if we need the blob and + // the user got `RuntimeBlob` then extract it, or otherwise create it from the given + // bytecode. + blob: RuntimeBlob, + }, + + /// The code is supplied in a form of a compiled artifact. + /// + /// This assumes that the code is already prepared for execution and the same `Config` was used. + Artifact { compiled_artifact: &'a [u8] }, +} + /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to /// machine code, which can be computationally heavy. /// /// The `cache_path` designates where this executor implementation can put compiled artifacts. pub fn create_runtime( - code: &[u8], - heap_pages: u64, + code_supply_mode: CodeSupplyMode<'_>, + config: Config, host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, - cache_path: Option<&Path>, ) -> std::result::Result { // Create the engine, store and finally the module from the given code. - let mut config = Config::new(); - config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); - if let Some(cache_path) = cache_path { - if let Err(reason) = setup_wasmtime_caching(cache_path, &mut config) { + let mut wasmtime_config = common_config(); + if let Some(ref cache_path) = config.cache_path { + if let Err(reason) = setup_wasmtime_caching(cache_path, &mut wasmtime_config) { log::warn!( "failed to setup wasmtime cache. Performance may degrade significantly: {}.", reason, @@ -181,19 +313,76 @@ pub fn create_runtime( } } - let engine = Engine::new(&config); - let module_wrapper = ModuleWrapper::new(&engine, code) - .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + let engine = Engine::new(&wasmtime_config); + + let (module, snapshot_data) = match code_supply_mode { + CodeSupplyMode::Verbatim { mut blob } => { + instrument(&mut blob, &config.semantics); + + if config.semantics.fast_instance_reuse { + let data_segments_snapshot = DataSegmentsSnapshot::take(&blob).map_err(|e| { + WasmError::Other(format!("cannot take data segments snapshot: {}", e)) + })?; + let data_segments_snapshot = Arc::new(data_segments_snapshot); + + let mutable_globals = ExposedMutableGlobalsSet::collect(&blob); + + let module = wasmtime::Module::new(&engine, &blob.serialize()) + .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + + (module, Some(InstanceSnapshotData { + data_segments_snapshot, + mutable_globals, + })) + } else { + let module = wasmtime::Module::new(&engine, &blob.serialize()) + .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + (module, None) + } + } + CodeSupplyMode::Artifact { compiled_artifact } => { + let module = wasmtime::Module::deserialize(&engine, compiled_artifact) + .map_err(|e| WasmError::Other(format!("cannot deserialize module: {}", e)))?; + + (module, None) + } + }; Ok(WasmtimeRuntime { - module_wrapper: Arc::new(module_wrapper), - heap_pages: heap_pages as u32, - allow_missing_func_imports, + module: Arc::new(module), + snapshot_data, + config, host_functions, engine, }) } +fn instrument(blob: &mut RuntimeBlob, semantics: &Semantics) { + if semantics.fast_instance_reuse { + blob.expose_mutable_globals(); + } + + if semantics.stack_depth_metering { + // TODO: implement deterministic stack metering https://github.com/paritytech/substrate/issues/8393 + } +} + +/// Takes a [`RuntimeBlob`] and precompiles it returning the serialized result of compilation. It +/// can then be used for calling [`create_runtime`] avoiding long compilation times. +pub fn prepare_runtime_artifact( + mut blob: RuntimeBlob, + semantics: &Semantics, +) -> std::result::Result, WasmError> { + instrument(&mut blob, semantics); + + let engine = Engine::new(&common_config()); + let module = wasmtime::Module::new(&engine, &blob.serialize()) + .map_err(|e| WasmError::Other(format!("cannot compile module: {}", e)))?; + module + .serialize() + .map_err(|e| WasmError::Other(format!("cannot serialize module: {}", e))) +} + fn perform_call( data: &[u8], instance_wrapper: Rc, diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index 1437c6f8509bf..c294f66b5017f 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -18,6 +18,8 @@ use std::ops::Range; +use sp_wasm_interface::Value; + /// Construct a range from an offset to a data length after the offset. /// Returns None if the end of the range would exceed some maximum offset. pub fn checked_range(offset: usize, len: usize, max: usize) -> Option> { @@ -28,3 +30,26 @@ pub fn checked_range(offset: usize, len: usize, max: usize) -> Option Value { + match val { + wasmtime::Val::I32(v) => Value::I32(v), + wasmtime::Val::I64(v) => Value::I64(v), + wasmtime::Val::F32(f_bits) => Value::F32(f_bits), + wasmtime::Val::F64(f_bits) => Value::F64(f_bits), + v => panic!("Given value type is unsupported by Substrate: {:?}", v), + } +} + +/// Converts a sp_wasm_interface's [`Value`] into the corresponding variant in wasmtime's [`wasmtime::Val`]. +pub fn into_wasmtime_val(value: Value) -> wasmtime::Val { + match value { + Value::I32(v) => wasmtime::Val::I32(v), + Value::I64(v) => wasmtime::Val::I64(v), + Value::F32(f_bits) => wasmtime::Val::F32(f_bits), + Value::F64(f_bits) => wasmtime::Val::F64(f_bits), + } +} diff --git a/client/finality-grandpa-warp-sync/src/lib.rs b/client/finality-grandpa-warp-sync/src/lib.rs index 54d06650bc376..dca6c2ad1ba3f 100644 --- a/client/finality-grandpa-warp-sync/src/lib.rs +++ b/client/finality-grandpa-warp-sync/src/lib.rs @@ -31,7 +31,7 @@ use sc_finality_grandpa::SharedAuthoritySet; mod proof; -pub use proof::{AuthoritySetChangeProof, WarpSyncProof}; +pub use proof::{WarpSyncFragment, WarpSyncProof}; /// Generates the appropriate [`RequestResponseConfig`] for a given chain configuration. pub fn request_response_config_for_chain + 'static>( @@ -66,7 +66,7 @@ pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestRespo RequestResponseConfig { name: generate_protocol_name(protocol_id).into(), max_request_size: 32, - max_response_size: 16 * 1024 * 1024, + max_response_size: proof::MAX_WARP_SYNC_PROOF_SIZE as u64, request_timeout: Duration::from_secs(10), inbound_queue: None, } @@ -120,14 +120,14 @@ impl> GrandpaWarpSyncRequestHandler, - pending_response: oneshot::Sender + pending_response: oneshot::Sender, ) -> Result<(), HandleRequestError> where NumberFor: sc_finality_grandpa::BlockNumberOps, { let request = Request::::decode(&mut &payload[..])?; let proof = WarpSyncProof::generate( - self.backend.blockchain(), + &*self.backend, request.begin, &self.authority_set.authority_set_changes(), )?; @@ -135,6 +135,7 @@ impl> GrandpaWarpSyncRequestHandler { +pub struct WarpSyncFragment { /// The last block that the given authority set finalized. This block should contain a digest /// signaling an authority set change from which we can fetch the next authority set. pub header: Block::Header, @@ -45,35 +46,36 @@ pub struct AuthoritySetChangeProof { /// An accumulated proof of multiple authority set changes. #[derive(Decode, Encode)] pub struct WarpSyncProof { - proofs: Vec>, + proofs: Vec>, is_finished: bool, } impl WarpSyncProof { /// Generates a warp sync proof starting at the given block. It will generate authority set /// change proofs for all changes that happened from `begin` until the current authority set - /// (capped by MAX_CHANGES_PER_WARP_SYNC_PROOF). + /// (capped by MAX_WARP_SYNC_PROOF_SIZE). pub fn generate( backend: &Backend, begin: Block::Hash, set_changes: &AuthoritySetChanges>, ) -> Result, HandleRequestError> where - Backend: BlockchainBackend, + Backend: ClientBackend, { // TODO: cache best response (i.e. the one with lowest begin_number) + let blockchain = backend.blockchain(); - let begin_number = backend + let begin_number = blockchain .block_number_from_id(&BlockId::Hash(begin))? .ok_or_else(|| HandleRequestError::InvalidRequest("Missing start block".to_string()))?; - if begin_number > backend.info().finalized_number { + if begin_number > blockchain.info().finalized_number { return Err(HandleRequestError::InvalidRequest( "Start block is not finalized".to_string(), )); } - let canon_hash = backend.hash(begin_number)?.expect( + let canon_hash = blockchain.hash(begin_number)?.expect( "begin number is lower than finalized number; \ all blocks below finalized number must have been imported; \ qed.", @@ -86,16 +88,11 @@ impl WarpSyncProof { } let mut proofs = Vec::new(); - + let mut proofs_encoded_len = 0; let mut proof_limit_reached = false; for (_, last_block) in set_changes.iter_from(begin_number) { - if proofs.len() >= MAX_CHANGES_PER_WARP_SYNC_PROOF { - proof_limit_reached = true; - break; - } - - let header = backend.header(BlockId::Number(*last_block))?.expect( + let header = blockchain.header(BlockId::Number(*last_block))?.expect( "header number comes from previously applied set changes; must exist in db; qed.", ); @@ -108,7 +105,7 @@ impl WarpSyncProof { break; } - let justification = backend + let justification = blockchain .justifications(BlockId::Number(*last_block))? .and_then(|just| just.into_justification(GRANDPA_ENGINE_ID)) .expect( @@ -119,16 +116,60 @@ impl WarpSyncProof { let justification = GrandpaJustification::::decode(&mut &justification[..])?; - proofs.push(AuthoritySetChangeProof { + let proof = WarpSyncFragment { header: header.clone(), justification, - }); + }; + let proof_size = proof.encoded_size(); + + // Check for the limit. We remove some bytes from the maximum size, because we're only + // counting the size of the `WarpSyncFragment`s. The extra margin is here to leave + // room for rest of the data (the size of the `Vec` and the boolean). + if proofs_encoded_len + proof_size >= MAX_WARP_SYNC_PROOF_SIZE - 50 { + proof_limit_reached = true; + break; + } + + proofs_encoded_len += proof_size; + proofs.push(proof); } - Ok(WarpSyncProof { + let is_finished = if proof_limit_reached { + false + } else { + let latest_justification = + sc_finality_grandpa::best_justification(backend)?.filter(|justification| { + // the existing best justification must be for a block higher than the + // last authority set change. if we didn't prove any authority set + // change then we fallback to make sure it's higher or equal to the + // initial warp sync block. + let limit = proofs + .last() + .map(|proof| proof.justification.target().0 + One::one()) + .unwrap_or(begin_number); + + justification.target().0 >= limit + }); + + if let Some(latest_justification) = latest_justification { + let header = blockchain.header(BlockId::Hash(latest_justification.target().1))? + .expect("header hash corresponds to a justification in db; must exist in db as well; qed."); + + proofs.push(WarpSyncFragment { + header, + justification: latest_justification, + }) + } + + true + }; + + let final_outcome = WarpSyncProof { proofs, - is_finished: !proof_limit_reached, - }) + is_finished, + }; + debug_assert!(final_outcome.encoded_size() <= MAX_WARP_SYNC_PROOF_SIZE); + Ok(final_outcome) } /// Verifies the warp sync proof starting at the given set id and with the given authorities. @@ -144,20 +185,28 @@ impl WarpSyncProof { let mut current_set_id = set_id; let mut current_authorities = authorities; - for proof in &self.proofs { + for (fragment_num, proof) in self.proofs.iter().enumerate() { proof .justification .verify(current_set_id, ¤t_authorities) .map_err(|err| HandleRequestError::InvalidProof(err.to_string()))?; - let scheduled_change = find_scheduled_change::(&proof.header).ok_or( - HandleRequestError::InvalidProof( - "Header is missing authority set change digest".to_string(), - ), - )?; + if proof.justification.target().1 != proof.header.hash() { + return Err(HandleRequestError::InvalidProof( + "mismatch between header and justification".to_owned() + )); + } - current_authorities = scheduled_change.next_authorities; - current_set_id += 1; + if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { + current_authorities = scheduled_change.next_authorities; + current_set_id += 1; + } else if fragment_num != self.proofs.len() - 1 { + // Only the last fragment of the proof is allowed to be missing the authority + // set change. + return Err(HandleRequestError::InvalidProof( + "Header is missing authority set change digest".to_string(), + )); + } } Ok((current_set_id, current_authorities)) @@ -170,7 +219,6 @@ mod tests { use codec::Encode; use rand::prelude::*; use sc_block_builder::BlockBuilderProvider; - use sc_client_api::Backend; use sc_finality_grandpa::{AuthoritySetChanges, GrandpaJustification}; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; @@ -237,7 +285,7 @@ mod tests { block.header.digest_mut().logs.push(digest); } - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); if let Some(new_authorities) = new_authorities { // generate a justification for this block, finalize it and note the authority set @@ -295,8 +343,7 @@ mod tests { let genesis_hash = client.hash(0).unwrap().unwrap(); let warp_sync_proof = - WarpSyncProof::generate(backend.blockchain(), genesis_hash, &authority_set_changes) - .unwrap(); + WarpSyncProof::generate(&*backend, genesis_hash, &authority_set_changes).unwrap(); // verifying the proof should yield the last set id and authorities let (new_set_id, new_authorities) = warp_sync_proof.verify(0, genesis_authorities).unwrap(); diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7ae5666c7bc84..1f21f454491b3 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -47,6 +47,7 @@ sc-block-builder = { version = "0.9.0", path = "../block-builder" } finality-grandpa = { version = "0.14.0", features = ["derive-codec"] } pin-project = "1.0.4" linked-hash-map = "0.5.2" +async-trait = "0.1.42" [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 1854a33d29f1f..194911e1f104a 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -18,18 +18,20 @@ //! Utilities for dealing with authorities, authority sets, and handoffs. +use std::cmp::Ord; +use std::fmt::Debug; +use std::ops::Add; + use fork_tree::ForkTree; -use parking_lot::RwLock; +use parking_lot::MappedMutexGuard; use finality_grandpa::voter_set::VoterSet; use parity_scale_codec::{Encode, Decode}; use log::debug; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; +use sc_consensus::shared_data::{SharedData, SharedDataLocked}; -use std::cmp::Ord; -use std::fmt::Debug; -use std::ops::Add; -use std::sync::Arc; +use crate::SetId; /// Error type returned on operations on the `AuthoritySet`. #[derive(Debug, derive_more::Display)] @@ -70,19 +72,30 @@ impl From for Error { /// A shared authority set. pub struct SharedAuthoritySet { - inner: Arc>>, + inner: SharedData>, } impl Clone for SharedAuthoritySet { fn clone(&self) -> Self { - SharedAuthoritySet { inner: self.inner.clone() } + SharedAuthoritySet { + inner: self.inner.clone(), + } } } impl SharedAuthoritySet { - /// Acquire a reference to the inner read-write lock. - pub(crate) fn inner(&self) -> &RwLock> { - &*self.inner + /// Returns access to the [`AuthoritySet`]. + pub(crate) fn inner(&self) -> MappedMutexGuard> { + self.inner.shared_data() + } + + /// Returns access to the [`AuthoritySet`] and locks it. + /// + /// For more information see [`SharedDataLocked`]. + pub(crate) fn inner_locked( + &self, + ) -> SharedDataLocked> { + self.inner.shared_data_locked() } } @@ -93,17 +106,17 @@ where N: Add + Ord + Clone + Debug, /// Get the earliest limit-block number that's higher or equal to the given /// min number, if any. pub(crate) fn current_limit(&self, min: N) -> Option { - self.inner.read().current_limit(min) + self.inner().current_limit(min) } /// Get the current set ID. This is incremented every time the set changes. pub fn set_id(&self) -> u64 { - self.inner.read().set_id + self.inner().set_id } /// Get the current authorities and their weights (for the current set ID). pub fn current_authorities(&self) -> VoterSet { - VoterSet::new(self.inner.read().current_authorities.iter().cloned()).expect( + VoterSet::new(self.inner().current_authorities.iter().cloned()).expect( "current_authorities is non-empty and weights are non-zero; \ constructor and all mutating operations on `AuthoritySet` ensure this; \ qed.", @@ -112,18 +125,20 @@ where N: Add + Ord + Clone + Debug, /// Clone the inner `AuthoritySet`. pub fn clone_inner(&self) -> AuthoritySet { - self.inner.read().clone() + self.inner().clone() } /// Clone the inner `AuthoritySetChanges`. pub fn authority_set_changes(&self) -> AuthoritySetChanges { - self.inner.read().authority_set_changes.clone() + self.inner().authority_set_changes.clone() } } impl From> for SharedAuthoritySet { fn from(set: AuthoritySet) -> Self { - SharedAuthoritySet { inner: Arc::new(RwLock::new(set)) } + SharedAuthoritySet { + inner: SharedData::new(set), + } } } @@ -671,6 +686,20 @@ impl + Clone> PendingChange { #[derive(Debug, Encode, Decode, Clone, PartialEq)] pub struct AuthoritySetChanges(Vec<(u64, N)>); +/// The response when querying for a set id for a specific block. Either we get a set id +/// together with a block number for the last block in the set, or that the requested block is in the +/// latest set, or that we don't know what set id the given block belongs to. +#[derive(Debug, PartialEq)] +pub enum AuthoritySetChangeId { + /// The requested block is in the latest set. + Latest, + /// Tuple containing the set id and the last block number of that set. + Set(SetId, N), + /// We don't know which set id the request block belongs to (this can only happen due to missing + /// data). + Unknown, +} + impl From> for AuthoritySetChanges { fn from(changes: Vec<(u64, N)>) -> AuthoritySetChanges { AuthoritySetChanges(changes) @@ -686,7 +715,15 @@ impl AuthoritySetChanges { self.0.push((set_id, block_number)); } - pub(crate) fn get_set_id(&self, block_number: N) -> Option<(u64, N)> { + pub(crate) fn get_set_id(&self, block_number: N) -> AuthoritySetChangeId { + if self.0 + .last() + .map(|last_auth_change| last_auth_change.1 < block_number) + .unwrap_or(false) + { + return AuthoritySetChangeId::Latest; + } + let idx = self.0 .binary_search_by_key(&block_number, |(_, n)| n.clone()) .unwrap_or_else(|b| b); @@ -698,16 +735,16 @@ impl AuthoritySetChanges { let (prev_set_id, _) = self.0[idx - 1usize]; if set_id != prev_set_id + 1u64 { // Without the preceding set_id we don't have a well-defined start. - return None; + return AuthoritySetChangeId::Unknown; } } else if set_id != 0 { // If this is the first index, yet not the first set id then it's not well-defined // that we are in the right set id. - return None; + return AuthoritySetChangeId::Unknown; } - Some((set_id, block_number)) + AuthoritySetChangeId::Set(set_id, block_number) } else { - None + AuthoritySetChangeId::Unknown } } @@ -1647,11 +1684,11 @@ mod tests { authority_set_changes.append(1, 81); authority_set_changes.append(2, 121); - assert_eq!(authority_set_changes.get_set_id(20), Some((0, 41))); - assert_eq!(authority_set_changes.get_set_id(40), Some((0, 41))); - assert_eq!(authority_set_changes.get_set_id(41), Some((0, 41))); - assert_eq!(authority_set_changes.get_set_id(42), Some((1, 81))); - assert_eq!(authority_set_changes.get_set_id(141), None); + assert_eq!(authority_set_changes.get_set_id(20), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(40), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(41), AuthoritySetChangeId::Set(0, 41)); + assert_eq!(authority_set_changes.get_set_id(42), AuthoritySetChangeId::Set(1, 81)); + assert_eq!(authority_set_changes.get_set_id(141), AuthoritySetChangeId::Latest); } #[test] @@ -1661,11 +1698,11 @@ mod tests { authority_set_changes.append(3, 81); authority_set_changes.append(4, 121); - assert_eq!(authority_set_changes.get_set_id(20), None); - assert_eq!(authority_set_changes.get_set_id(40), None); - assert_eq!(authority_set_changes.get_set_id(41), None); - assert_eq!(authority_set_changes.get_set_id(42), Some((3, 81))); - assert_eq!(authority_set_changes.get_set_id(141), None); + assert_eq!(authority_set_changes.get_set_id(20), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(40), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(41), AuthoritySetChangeId::Unknown); + assert_eq!(authority_set_changes.get_set_id(42), AuthoritySetChangeId::Set(3, 81)); + assert_eq!(authority_set_changes.get_set_id(141), AuthoritySetChangeId::Latest); } #[test] diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 43c45b9f10ae1..296f7c13c5244 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -19,27 +19,30 @@ //! Schema for stuff in the aux-db. use std::fmt::Debug; -use parity_scale_codec::{Encode, Decode}; -use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use fork_tree::ForkTree; + use finality_grandpa::round::State as RoundState; -use sp_runtime::traits::{Block as BlockT, NumberFor}; use log::{info, warn}; -use sp_finality_grandpa::{AuthorityList, SetId, RoundNumber}; +use parity_scale_codec::{Decode, Encode}; + +use fork_tree::ForkTree; +use sc_client_api::backend::AuxStore; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_finality_grandpa::{AuthorityList, RoundNumber, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; use crate::authorities::{ - AuthoritySet, AuthoritySetChanges, SharedAuthoritySet, PendingChange, DelayKind, + AuthoritySet, AuthoritySetChanges, DelayKind, PendingChange, SharedAuthoritySet, }; use crate::environment::{ CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, }; -use crate::NewAuthoritySet; +use crate::{GrandpaJustification, NewAuthoritySet}; const VERSION_KEY: &[u8] = b"grandpa_schema_version"; const SET_STATE_KEY: &[u8] = b"grandpa_completed_round"; const CONCLUDED_ROUNDS: &[u8] = b"grandpa_concluded_rounds"; const AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; +const BEST_JUSTIFICATION: &[u8] = b"grandpa_best_justification"; const CURRENT_VERSION: u32 = 3; @@ -464,7 +467,7 @@ where pub(crate) fn update_authority_set( set: &AuthoritySet>, new_set: Option<&NewAuthoritySet>>, - write_aux: F + write_aux: F, ) -> R where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, @@ -492,6 +495,33 @@ where } } +/// Update the justification for the latest finalized block on-disk. +/// +/// We always keep around the justification for the best finalized block and overwrite it +/// as we finalize new blocks, this makes sure that we don't store useless justifications +/// but can always prove finality of the latest block. +pub(crate) fn update_best_justification( + justification: &GrandpaJustification, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + let encoded_justification = justification.encode(); + write_aux(&[(BEST_JUSTIFICATION, &encoded_justification[..])]) +} + +/// Fetch the justification for the latest block finalized by GRANDPA, if any. +pub fn best_justification( + backend: &B, +) -> ClientResult>> +where + B: AuxStore, + Block: BlockT, +{ + load_decode::<_, GrandpaJustification>(backend, BEST_JUSTIFICATION) +} + /// Write voter set state. pub(crate) fn write_voter_set_state( backend: &B, @@ -517,10 +547,9 @@ pub(crate) fn write_concluded_round( #[cfg(test)] pub(crate) fn load_authorities( - backend: &B + backend: &B, ) -> Option> { - load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY) - .expect("backend error") + load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY).expect("backend error") } #[cfg(test)] @@ -592,7 +621,7 @@ mod test { ).unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, @@ -616,7 +645,7 @@ mod test { votes: vec![], }, set_id, - &*authority_set.inner().read(), + &*authority_set.inner(), ), current_rounds, }, @@ -688,7 +717,7 @@ mod test { ).unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, @@ -712,7 +741,7 @@ mod test { votes: vec![], }, set_id, - &*authority_set.inner().read(), + &*authority_set.inner(), ), current_rounds, }, @@ -781,7 +810,7 @@ mod test { ).unwrap(); assert_eq!( - *authority_set.inner().read(), + *authority_set.inner(), AuthoritySet::new( authorities.clone(), set_id, diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 27ff1e57b670c..d3a5b49b50726 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -508,7 +508,7 @@ where .best_chain() .map_err(|e| Error::Blockchain(e.to_string()))?; - let authority_set = self.authority_set.inner().read(); + let authority_set = self.authority_set.inner(); // block hash and number of the next pending authority set change in the // given best chain. @@ -1228,7 +1228,7 @@ where // NOTE: lock must be held through writing to DB to avoid race. this lock // also implicitly synchronizes the check for last finalized number // below. - let mut authority_set = authority_set.inner().write(); + let mut authority_set = authority_set.inner(); let status = client.info(); @@ -1275,11 +1275,8 @@ where // `N+1`. this assumption is required to make sure we store // justifications for transition blocks which will be requested by // syncing clients. - let justification = match justification_or_commit { - JustificationOrCommit::Justification(justification) => { - notify_justification(justification_sender, || Ok(justification.clone())); - Some(justification.encode()) - }, + let (justification_required, justification) = match justification_or_commit { + JustificationOrCommit::Justification(justification) => (true, justification), JustificationOrCommit::Commit((round_number, commit)) => { let mut justification_required = // justification is always required when block that enacts new authorities @@ -1297,42 +1294,35 @@ where } } - // NOTE: the code below is a bit more verbose because we - // really want to avoid creating a justification if it isn't - // needed (e.g. if there's no subscribers), and also to avoid - // creating it twice. depending on the vote tree for the round, - // creating a justification might require multiple fetches of - // headers from the database. - let justification = || GrandpaJustification::from_commit( + let justification = GrandpaJustification::from_commit( &client, round_number, commit, - ); - - if justification_required { - let justification = justification()?; - notify_justification(justification_sender, || Ok(justification.clone())); - - Some(justification.encode()) - } else { - notify_justification(justification_sender, justification); + )?; - None - } + (justification_required, justification) }, }; - debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); + notify_justification(justification_sender, || Ok(justification.clone())); + + let persisted_justification = if justification_required { + Some((GRANDPA_ENGINE_ID, justification.encode())) + } else { + None + }; // ideally some handle to a synchronization oracle would be used // to avoid unconditionally notifying. - let justification = justification.map(|j| (GRANDPA_ENGINE_ID, j.clone())); client - .apply_finality(import_op, BlockId::Hash(hash), justification, true) + .apply_finality(import_op, BlockId::Hash(hash), persisted_justification, true) .map_err(|e| { warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); e })?; + + debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); + telemetry!( telemetry; CONSENSUS_INFO; @@ -1340,6 +1330,11 @@ where "number" => ?number, "hash" => ?hash, ); + crate::aux_schema::update_best_justification( + &justification, + |insert| apply_aux(import_op, insert, &[]), + )?; + let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { // the authority set has changed. let (new_id, set_ref) = authority_set.current(); diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 80ba8cee9101e..6735d91ba8b75 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -36,23 +36,23 @@ //! finality proof (that finalizes some block C that is ancestor of the B and descendant //! of the U) could be returned. -use log::trace; +use log::{trace, warn}; use std::sync::Arc; -use finality_grandpa::BlockNumberOps; use parity_scale_codec::{Encode, Decode}; -use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; +use sp_finality_grandpa::GRANDPA_ENGINE_ID; use sp_runtime::{ - EncodedJustification, generic::BlockId, + generic::BlockId, traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, }; use sc_client_api::backend::Backend; -use sp_finality_grandpa::{AuthorityId, GRANDPA_ENGINE_ID}; -use crate::authorities::AuthoritySetChanges; -use crate::justification::GrandpaJustification; -use crate::SharedAuthoritySet; -use crate::VoterSet; +use crate::{ + SharedAuthoritySet, best_justification, + authorities::{AuthoritySetChangeId, AuthoritySetChanges}, + justification::GrandpaJustification, +}; const MAX_UNKNOWN_HEADERS: usize = 100_000; @@ -97,14 +97,13 @@ where impl FinalityProofProvider where Block: BlockT, - NumberFor: BlockNumberOps, B: Backend + Send + Sync + 'static, { /// Prove finality for the given block number by returning a Justification for the last block of /// the authority set. pub fn prove_finality( &self, - block: NumberFor + block: NumberFor, ) -> Result>, FinalityProofError> { let authority_set_changes = if let Some(changes) = self .shared_authority_set @@ -116,8 +115,8 @@ where return Ok(None); }; - prove_finality::<_, _, GrandpaJustification>( - &*self.backend.blockchain(), + prove_finality( + &*self.backend, authority_set_changes, block, ) @@ -151,19 +150,19 @@ pub enum FinalityProofError { Client(sp_blockchain::Error), } -fn prove_finality( - blockchain: &B, +fn prove_finality( + backend: &B, authority_set_changes: AuthoritySetChanges>, block: NumberFor, ) -> Result>, FinalityProofError> where Block: BlockT, - B: BlockchainBackend, - J: ProvableJustification, + B: Backend, { - // Early-return if we sure that there are no blocks finalized AFTER begin block - let info = blockchain.info(); - if info.finalized_number <= block { + // Early-return if we are sure that there are no blocks finalized that cover the requested + // block. + let info = backend.blockchain().info(); + if info.finalized_number < block { let err = format!( "Requested finality proof for descendant of #{} while we only have finalized #{}.", block, @@ -173,45 +172,60 @@ where return Err(FinalityProofError::BlockNotYetFinalized); } - // Get set_id the block belongs to, and the last block of the set which should contain a - // Justification we can use to prove the requested block. - let (_, last_block_for_set) = if let Some(id) = authority_set_changes.get_set_id(block) { - id - } else { - trace!( - target: "afg", - "AuthoritySetChanges does not cover the requested block #{}. \ - Maybe the subscription API is more appropriate.", - block, - ); - return Err(FinalityProofError::BlockNotInAuthoritySetChanges); - }; - - // Get the Justification stored at the last block of the set - let last_block_for_set_id = BlockId::Number(last_block_for_set); - let justification = - if let Some(grandpa_justification) = blockchain.justifications(last_block_for_set_id)? - .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) - { - grandpa_justification - } else { - trace!( + let (justification, just_block) = match authority_set_changes.get_set_id(block) { + AuthoritySetChangeId::Latest => { + if let Some(justification) = best_justification(backend)? + .map(|j: GrandpaJustification| (j.encode(), j.target().0)) + { + justification + } else { + trace!( + target: "afg", + "No justification found for the latest finalized block. \ + Returning empty proof.", + ); + return Ok(None); + } + } + AuthoritySetChangeId::Set(_, last_block_for_set) => { + let last_block_for_set_id = BlockId::Number(last_block_for_set); + let justification = if let Some(grandpa_justification) = backend + .blockchain() + .justifications(last_block_for_set_id)? + .and_then(|justifications| justifications.into_justification(GRANDPA_ENGINE_ID)) + { + grandpa_justification + } else { + trace!( + target: "afg", + "No justification found when making finality proof for {}. \ + Returning empty proof.", + block, + ); + return Ok(None); + }; + (justification, last_block_for_set) + } + AuthoritySetChangeId::Unknown => { + warn!( target: "afg", - "No justification found when making finality proof for {}. Returning empty proof.", + "AuthoritySetChanges does not cover the requested block #{} due to missing data. \ + You need to resync to populate AuthoritySetChanges properly.", block, ); - return Ok(None); - }; + return Err(FinalityProofError::BlockNotInAuthoritySetChanges); + } + }; // Collect all headers from the requested block until the last block of the set let unknown_headers = { let mut headers = Vec::new(); let mut current = block + One::one(); loop { - if current >= last_block_for_set || headers.len() >= MAX_UNKNOWN_HEADERS { + if current > just_block || headers.len() >= MAX_UNKNOWN_HEADERS { break; } - headers.push(blockchain.expect_header(BlockId::Number(current))?); + headers.push(backend.blockchain().expect_header(BlockId::Number(current))?); current += One::one(); } headers @@ -219,7 +233,7 @@ where Ok(Some( FinalityProof { - block: blockchain.expect_block_hash_from_id(&last_block_for_set_id)?, + block: backend.blockchain().expect_block_hash_from_id(&BlockId::Number(just_block))?, justification, unknown_headers, } @@ -227,96 +241,48 @@ where )) } -/// Check GRANDPA proof-of-finality for the given block. -/// -/// Returns the vector of headers that MUST be validated + imported -/// AND if at least one of those headers is invalid, all other MUST be considered invalid. -/// -/// This is currently not used, and exists primarily as an example of how to check finality proofs. -#[cfg(test)] -fn check_finality_proof( - current_set_id: u64, - current_authorities: sp_finality_grandpa::AuthorityList, - remote_proof: Vec, -) -> ClientResult> -where - J: ProvableJustification

, -{ - let proof = FinalityProof::
::decode(&mut &remote_proof[..]) - .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; - - let justification: J = Decode::decode(&mut &proof.justification[..]) - .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(current_set_id, ¤t_authorities)?; - - Ok(proof) -} - -/// Justification used to prove block finality. -pub trait ProvableJustification: Encode + Decode { - /// Verify justification with respect to authorities set and authorities set id. - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()>; - - /// Decode and verify justification. - fn decode_and_verify( - justification: &EncodedJustification, - set_id: u64, - authorities: &[(AuthorityId, u64)], - ) -> ClientResult { - let justification = - Self::decode(&mut &**justification).map_err(|_| ClientError::JustificationDecode)?; - justification.verify(set_id, authorities)?; - Ok(justification) - } -} - -impl ProvableJustification for GrandpaJustification -where - NumberFor: BlockNumberOps, -{ - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - let authorities = VoterSet::new(authorities.iter().cloned()).ok_or( - ClientError::Consensus(sp_consensus::Error::InvalidAuthoritiesSet), - )?; - - GrandpaJustification::verify_with_voter_set(self, set_id, &authorities) - } -} - #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::authorities::AuthoritySetChanges; + use crate::{ + authorities::AuthoritySetChanges, BlockNumberOps, ClientError, SetId, + }; + use futures::executor::block_on; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::{apply_aux, LockImportRun}; + use sp_consensus::BlockOrigin; use sp_core::crypto::Public; - use sp_runtime::Justifications; - use sp_finality_grandpa::AuthorityList; - use sc_client_api::NewBlockState; - use sc_client_api::in_mem::Blockchain as InMemoryBlockchain; - use substrate_test_runtime_client::runtime::{Block, Header, H256}; - - pub(crate) type FinalityProof = super::FinalityProof
; - - #[derive(Debug, PartialEq, Encode, Decode)] - pub struct TestJustification(pub (u64, AuthorityList), pub Vec); - - impl ProvableJustification
for TestJustification { - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - if (self.0).0 != set_id || (self.0).1 != authorities { - return Err(ClientError::BadJustification("test".into())); - } + use sp_finality_grandpa::{AuthorityId, GRANDPA_ENGINE_ID as ID}; + use sp_keyring::Ed25519Keyring; + use substrate_test_runtime_client::{ + runtime::{Block, Header, H256}, + Backend as TestBackend, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClient, TestClientBuilder, TestClientBuilderExt, + }; - Ok(()) - } + /// Check GRANDPA proof-of-finality for the given block. + /// + /// Returns the vector of headers that MUST be validated + imported + /// AND if at least one of those headers is invalid, all other MUST be considered invalid. + fn check_finality_proof( + current_set_id: SetId, + current_authorities: sp_finality_grandpa::AuthorityList, + remote_proof: Vec, + ) -> sp_blockchain::Result> + where + NumberFor: BlockNumberOps, + { + let proof = super::FinalityProof::::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; + + let justification: GrandpaJustification = Decode::decode(&mut &proof.justification[..]) + .map_err(|_| ClientError::JustificationDecode)?; + justification.verify(current_set_id, ¤t_authorities)?; + + Ok(proof) } - #[derive(Debug, PartialEq, Encode, Decode)] - pub struct TestBlockJustification(TestJustification, u64, H256); - - impl ProvableJustification
for TestBlockJustification { - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - self.0.verify(set_id, authorities) - } - } + pub(crate) type FinalityProof = super::FinalityProof
; fn header(number: u64) -> Header { let parent_hash = match number { @@ -332,57 +298,64 @@ pub(crate) mod tests { ) } - fn test_blockchain() -> InMemoryBlockchain { - use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; - let blockchain = InMemoryBlockchain::::new(); - let just0 = Some(Justifications::from((ID, vec![0]))); - let just1 = Some(Justifications::from((ID, vec![1]))); - let just2 = None; - let just3 = Some(Justifications::from((ID, vec![3]))); - blockchain.insert(header(0).hash(), header(0), just0, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(1).hash(), header(1), just1, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(2).hash(), header(2), just2, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(3).hash(), header(3), just3, None, NewBlockState::Final).unwrap(); - blockchain + fn test_blockchain( + number_of_blocks: u64, + to_finalize: &[u64], + ) -> (Arc, Arc, Vec) { + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let mut blocks = Vec::new(); + for _ in 0..number_of_blocks { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + blocks.push(block); + } + + for block in to_finalize { + client.finalize_block(BlockId::Number(*block), None).unwrap(); + } + (client, backend, blocks) + } + + fn store_best_justification(client: &TestClient, just: &GrandpaJustification) { + client.lock_import_and_run(|import_op| { + crate::aux_schema::update_best_justification( + just, + |insert| apply_aux(import_op, insert, &[]), + ) + }) + .unwrap(); } #[test] fn finality_proof_fails_if_no_more_last_finalized_blocks() { - use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; - let blockchain = test_blockchain(); - let just1 = Some(Justifications::from((ID, vec![1]))); - let just2 = Some(Justifications::from((ID, vec![2]))); - blockchain.insert(header(4).hash(), header(4), just1, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(5).hash(), header(5), just2, None, NewBlockState::Best).unwrap(); + let (_, backend, _) = test_blockchain(6, &[4]); + let authority_set_changes = AuthoritySetChanges::empty(); - let mut authority_set_changes = AuthoritySetChanges::empty(); - authority_set_changes.append(0, 5); - - // The last finalized block is 3, so we cannot provide further justifications. - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, + // The last finalized block is 4, so we cannot provide further justifications. + let proof_of_5 = prove_finality( + &*backend, authority_set_changes, - *header(4).number(), + 5, ); - assert!(matches!(proof_of_4, Err(FinalityProofError::BlockNotYetFinalized))); + assert!(matches!(proof_of_5, Err(FinalityProofError::BlockNotYetFinalized))); } #[test] fn finality_proof_is_none_if_no_justification_known() { - let blockchain = test_blockchain(); - blockchain - .insert(header(4).hash(), header(4), None, None, NewBlockState::Final) - .unwrap(); + let (_, backend, _) = test_blockchain(6, &[4]); let mut authority_set_changes = AuthoritySetChanges::empty(); authority_set_changes.append(0, 4); // Block 4 is finalized without justification // => we can't prove finality of 3 - let proof_of_3 = prove_finality::<_, _, TestJustification>( - &blockchain, + let proof_of_3 = prove_finality( + &*backend, authority_set_changes, - *header(3).number(), + 3, ) .unwrap(); assert_eq!(proof_of_3, None); @@ -391,7 +364,7 @@ pub(crate) mod tests { #[test] fn finality_proof_check_fails_when_proof_decode_fails() { // When we can't decode proof from Vec - check_finality_proof::<_, TestJustification>( + check_finality_proof::( 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], vec![42], @@ -402,92 +375,208 @@ pub(crate) mod tests { #[test] fn finality_proof_check_fails_when_proof_is_empty() { // When decoded proof has zero length - check_finality_proof::<_, TestJustification>( + check_finality_proof::( 1, vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - Vec::::new().encode(), + Vec::>::new().encode(), ) .unwrap_err(); } #[test] - fn finality_proof_check_works() { - let auth = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; + fn finality_proof_check_fails_with_incomplete_justification() { + let (client, _, blocks) = test_blockchain(8, &[4, 5, 8]); + + // Create a commit without precommits + let commit = finality_grandpa::Commit { + target_hash: blocks[7].hash(), + target_number: *blocks[7].header().number(), + precommits: Vec::new(), + }; + let grandpa_just = GrandpaJustification::from_commit(&client, 8, commit).unwrap(); + let finality_proof = FinalityProof { block: header(2).hash(), - justification: TestJustification((1, auth.clone()), vec![7]).encode(), + justification: grandpa_just.encode(), unknown_headers: Vec::new(), }; - let proof = check_finality_proof::<_, TestJustification>( + + check_finality_proof::( 1, - auth.clone(), + vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], finality_proof.encode(), - ) - .unwrap(); - assert_eq!(proof, finality_proof); + ).unwrap_err(); + } + + fn create_commit( + block: Block, + round: u64, + set_id: SetId, + auth: &[Ed25519Keyring] + ) -> finality_grandpa::Commit + where + Id: From, + S: From, + { + let mut precommits = Vec::new(); + + for voter in auth { + let precommit = finality_grandpa::Precommit { + target_hash: block.hash(), + target_number: *block.header().number(), + }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_finality_grandpa::localized_payload(round, set_id, &msg); + let signature = voter.sign(&encoded[..]).into(); + + let signed_precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: voter.public().into(), + }; + precommits.push(signed_precommit); + } + + finality_grandpa::Commit { + target_hash: block.hash(), + target_number: *block.header().number(), + precommits, + } + } + + #[test] + fn finality_proof_check_works_with_correct_justification() { + let (client, _, blocks) = test_blockchain(8, &[4, 5, 8]); + + let alice = Ed25519Keyring::Alice; + let set_id = 1; + let round = 8; + let commit = create_commit(blocks[7].clone(), round, set_id, &[alice]); + let grandpa_just = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + + let finality_proof = FinalityProof { + block: header(2).hash(), + justification: grandpa_just.encode(), + unknown_headers: Vec::new(), + }; + assert_eq!( + finality_proof, + check_finality_proof::( + set_id, + vec![(alice.public().into(), 1u64)], + finality_proof.encode(), + ) + .unwrap(), + ); } #[test] fn finality_proof_using_authority_set_changes_fails_with_undefined_start() { - use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; - let blockchain = test_blockchain(); - let auth = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let grandpa_just4 = TestJustification((0, auth.clone()), vec![4]).encode(); - let grandpa_just7 = TestJustification((1, auth.clone()), vec![7]).encode(); - let just4 = Some(Justifications::from((ID, grandpa_just4))); - let just7 = Some(Justifications::from((ID, grandpa_just7))); - blockchain.insert(header(4).hash(), header(4), just4, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(7).hash(), header(7), just7, None, NewBlockState::Final).unwrap(); + let (_, backend, _) = test_blockchain(8, &[4, 5, 8]); // We have stored the correct block number for the relevant set, but as we are missing the // block for the preceding set the start is not well-defined. let mut authority_set_changes = AuthoritySetChanges::empty(); - authority_set_changes.append(1, 7); + authority_set_changes.append(1, 8); - let proof_of_5 = prove_finality::<_, _, TestJustification>( - &blockchain, + let proof_of_6 = prove_finality( + &*backend, authority_set_changes, - *header(5).number(), + 6, ); - assert!(matches!(proof_of_5, Err(FinalityProofError::BlockNotInAuthoritySetChanges))); + assert!(matches!(proof_of_6, Err(FinalityProofError::BlockNotInAuthoritySetChanges))); } #[test] fn finality_proof_using_authority_set_changes_works() { - use sp_finality_grandpa::GRANDPA_ENGINE_ID as ID; - let blockchain = test_blockchain(); - let auth = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let grandpa_just4 = TestJustification((0, auth.clone()), vec![4]).encode(); - let grandpa_just7 = TestJustification((1, auth.clone()), vec![7]).encode(); - let just4 = Some(Justifications::from((ID, grandpa_just4))); - let just7 = Some(Justifications::from((ID, grandpa_just7.clone()))); - blockchain.insert(header(4).hash(), header(4), just4, None, NewBlockState::Final) .unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final) .unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(7).hash(), header(7), just7, None, NewBlockState::Final).unwrap(); + let (client, backend, blocks) = test_blockchain(8, &[4, 5]); + let block7 = &blocks[6]; + let block8 = &blocks[7]; + + let round = 8; + let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); + let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + client.finalize_block( + BlockId::Number(8), + Some((ID, grandpa_just8.encode().clone())) + ) + .unwrap(); + + // Authority set change at block 8, so the justification stored there will be used in the + // FinalityProof for block 6 let mut authority_set_changes = AuthoritySetChanges::empty(); - authority_set_changes.append(0, 4); - authority_set_changes.append(1, 7); + authority_set_changes.append(0, 5); + authority_set_changes.append(1, 8); + + let proof_of_6: FinalityProof = Decode::decode( + &mut &prove_finality( + &*backend, + authority_set_changes.clone(), + 6, + ) + .unwrap() + .unwrap()[..], + ) + .unwrap(); + assert_eq!( + proof_of_6, + FinalityProof { + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![block7.header().clone(), block8.header().clone()], + }, + ); + } + + #[test] + fn finality_proof_in_last_set_fails_without_latest() { + let (_, backend, _) = test_blockchain(8, &[4, 5, 8]); + + // No recent authority set change, so we are in the latest set, and we will try to pickup + // the best stored justification, for which there is none in this case. + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); + + assert!(matches!( + prove_finality(&*backend, authority_set_changes, 6), + Ok(None), + )); + } + + #[test] + fn finality_proof_in_last_set_using_latest_justification_works() { + let (client, backend, blocks) = test_blockchain(8, &[4, 5, 8]); + let block7 = &blocks[6]; + let block8 = &blocks[7]; + + let round = 8; + let commit = create_commit(block8.clone(), round, 1, &[Ed25519Keyring::Alice]); + let grandpa_just8 = GrandpaJustification::from_commit(&client, round, commit).unwrap(); + store_best_justification(&client, &grandpa_just8); + + // No recent authority set change, so we are in the latest set, and will pickup the best + // stored justification + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 5); - let proof_of_5: FinalityProof = Decode::decode( - &mut &prove_finality::<_, _, TestJustification>( - &blockchain, + let proof_of_6: FinalityProof = Decode::decode( + &mut &prove_finality( + &*backend, authority_set_changes, - *header(5).number(), + 6, ) .unwrap() .unwrap()[..], ) .unwrap(); assert_eq!( - proof_of_5, + proof_of_6, FinalityProof { - block: header(7).hash(), - justification: grandpa_just7, - unknown_headers: vec![header(6)], + block: block8.hash(), + justification: grandpa_just8.encode(), + unknown_headers: vec![block7.header().clone(), block8.header().clone()], } ); } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 6814d5dfb6195..482859b1f79ef 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -20,13 +20,13 @@ use std::{sync::Arc, collections::HashMap}; use log::debug; use parity_scale_codec::Encode; -use parking_lot::RwLockWriteGuard; use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_telemetry::TelemetryHandle; use sp_utils::mpsc::TracingUnboundedSender; use sp_api::TransactionFor; +use sc_consensus::shared_data::{SharedDataLockedUpgradable, SharedDataLocked}; use sp_consensus::{ BlockImport, Error as ConsensusError, @@ -99,7 +99,7 @@ impl JustificationImport let chain_info = self.inner.info(); // request justifications for all pending changes for which change blocks have already been imported - let authorities = self.authority_set.inner().read(); + let authorities = self.authority_set.inner(); for pending_change in authorities.pending_changes() { if pending_change.delay_kind == DelayKind::Finalized && pending_change.effective_number() > chain_info.finalized_number && @@ -157,30 +157,30 @@ impl AppliedChanges { } } -struct PendingSetChanges<'a, Block: 'a + BlockT> { +struct PendingSetChanges { just_in_case: Option<( AuthoritySet>, - RwLockWriteGuard<'a, AuthoritySet>>, + SharedDataLockedUpgradable>>, )>, applied_changes: AppliedChanges>, do_pause: bool, } -impl<'a, Block: 'a + BlockT> PendingSetChanges<'a, Block> { +impl PendingSetChanges { // revert the pending set change explicitly. - fn revert(self) { } + fn revert(self) {} fn defuse(mut self) -> (AppliedChanges>, bool) { self.just_in_case = None; - let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + let applied_changes = std::mem::replace(&mut self.applied_changes, AppliedChanges::None); (applied_changes, self.do_pause) } } -impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { +impl Drop for PendingSetChanges { fn drop(&mut self) { if let Some((old_set, mut authorities)) = self.just_in_case.take() { - *authorities = old_set; + *authorities.upgrade() = old_set; } } } @@ -269,33 +269,34 @@ where // when we update the authorities, we need to hold the lock // until the block is written to prevent a race if we need to restore // the old authority set on error or panic. - struct InnerGuard<'a, T: 'a> { - old: Option, - guard: Option>, + struct InnerGuard<'a, H, N> { + old: Option>, + guard: Option>>, } - impl<'a, T: 'a> InnerGuard<'a, T> { - fn as_mut(&mut self) -> &mut T { + impl<'a, H, N> InnerGuard<'a, H, N> { + fn as_mut(&mut self) -> &mut AuthoritySet { &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") } - fn set_old(&mut self, old: T) { + fn set_old(&mut self, old: AuthoritySet) { if self.old.is_none() { // ignore "newer" old changes. self.old = Some(old); } } - fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { - if let Some(old) = self.old.take() { - Some((old, self.guard.take().expect("only taken on deconstruction; qed"))) - } else { - None - } + fn consume( + mut self, + ) -> Option<(AuthoritySet, SharedDataLocked<'a, AuthoritySet>)> { + self.old.take().map(|old| ( + old, + self.guard.take().expect("only taken on deconstruction; qed"), + )) } } - impl<'a, T: 'a> Drop for InnerGuard<'a, T> { + impl<'a, H, N> Drop for InnerGuard<'a, H, N> { fn drop(&mut self) { if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { *guard = old; @@ -315,7 +316,7 @@ where let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); let mut guard = InnerGuard { - guard: Some(self.authority_set.inner().write()), + guard: Some(self.authority_set.inner_locked()), old: None, }; @@ -413,10 +414,13 @@ where ); } + let just_in_case = just_in_case.map(|(o, i)| (o, i.release_mutex())); + Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) } } +#[async_trait::async_trait] impl BlockImport for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, @@ -425,11 +429,13 @@ impl BlockImport Client: crate::ClientForGrandpa, for<'a> &'a Client: BlockImport>, + TransactionFor: Send + 'static, + SC: Send, { type Error = ConsensusError; type Transaction = TransactionFor; - fn import_block( + async fn import_block( &mut self, mut block: BlockImportParams, new_cache: HashMap>, @@ -452,7 +458,7 @@ impl BlockImport // we don't want to finalize on `inner.import_block` let mut justifications = block.justifications.take(); - let import_result = (&*self.inner).import_block(block, new_cache); + let import_result = (&*self.inner).import_block(block, new_cache).await; let mut imported_aux = { match import_result { @@ -556,11 +562,11 @@ impl BlockImport Ok(ImportResult::Imported(imported_aux)) } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - self.inner.check_block(block) + self.inner.check_block(block).await } } @@ -580,8 +586,7 @@ impl GrandpaBlockImport GrandpaBlockImport GrandpaJustification { Ok(()) } + + /// The target block number and hash that this justifications proves finality for. + pub fn target(&self) -> (NumberFor, Block::Hash) { + (self.commit.target_number, self.commit.target_hash) + } } /// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index c6c2a39674b8c..e1c3a2c131540 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -122,6 +122,7 @@ mod until_imported; mod voting_rule; pub use authorities::{AuthoritySet, AuthoritySetChanges, SharedAuthoritySet}; +pub use aux_schema::best_justification; pub use finality_proof::{FinalityProof, FinalityProofProvider, FinalityProofError}; pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; pub use import::{find_scheduled_change, find_forced_change, GrandpaBlockImport}; @@ -1019,7 +1020,7 @@ where // set changed (not where the signal happened!) as the base. let set_state = VoterSetState::live( new.set_id, - &*self.env.authority_set.inner().read(), + &*self.env.authority_set.inner(), (new.canon_hash, new.canon_number), ); @@ -1133,13 +1134,12 @@ fn local_authority_id( voters: &VoterSet, keystore: Option<&SyncCryptoStorePtr>, ) -> Option { - match keystore { - Some(keystore) => voters - .iter() - .find(|(p, _)| { - SyncCryptoStore::has_keys(&**keystore, &[(p.to_raw_vec(), AuthorityId::ID)]) - }) - .map(|(p, _)| p.clone()), - None => None, - } + keystore.and_then(|keystore| { + voters + .iter() + .find(|(p, _)| { + SyncCryptoStore::has_keys(&**keystore, &[(p.to_raw_vec(), AuthorityId::ID)]) + }) + .map(|(p, _)| p.clone()) + }) } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index c0eab15e4f455..827a7388d6033 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -326,7 +326,7 @@ where // set changed (not where the signal happened!) as the base. let set_state = VoterSetState::live( new.set_id, - &*self.persistent_data.authority_set.inner().read(), + &*self.persistent_data.authority_set.inner(), (new.canon_hash, new.canon_number), ); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d0a6b0874fa77..fa4bd028bfe25 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -28,9 +28,9 @@ use sc_network_test::{ use sc_network::config::ProtocolConfig; use parking_lot::{RwLock, Mutex}; use futures_timer::Delay; +use futures::executor::block_on; use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; -use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; use sp_api::{ApiRef, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; @@ -43,7 +43,9 @@ use sp_runtime::{Justifications, traits::{Block as BlockT, Header as HeaderT}}; use sp_runtime::generic::{BlockId, DigestItem}; use sp_core::H256; use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; -use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof}; +use sp_finality_grandpa::{ + GRANDPA_ENGINE_ID, AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, +}; use authorities::AuthoritySet; use sc_block_builder::BlockBuilderProvider; @@ -54,7 +56,13 @@ use sp_application_crypto::key_types::GRANDPA; type TestLinkHalf = LinkHalf>; type PeerData = Mutex>; -type GrandpaPeer = Peer; +type GrandpaPeer = Peer; +type GrandpaBlockImport = crate::GrandpaBlockImport< + substrate_test_runtime_client::Backend, + Block, + PeersFullClient, + LongestChain +>; struct GrandpaTestNet { peers: Vec, @@ -93,6 +101,7 @@ impl GrandpaTestNet { impl TestNetFactory for GrandpaTestNet { type Verifier = PassThroughVerifier; type PeerData = PeerData; + type BlockImport = GrandpaBlockImport; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -124,9 +133,9 @@ impl TestNetFactory for GrandpaTestNet { PassThroughVerifier::new(false) // use non-instant finality. } - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, PeerData, ) @@ -141,7 +150,7 @@ impl TestNetFactory for GrandpaTestNet { ).expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); ( - BlockImportAdapter::new_full(import), + BlockImportAdapter::new(import), Some(justification_import), Mutex::new(Some(link)), ) @@ -442,10 +451,19 @@ fn finalize_3_voters_1_full_observer() { } // wait for all finalized on each. - let wait_for = futures::future::join_all(finality_notifications) - .map(|_| ()); + let wait_for = futures::future::join_all(finality_notifications).map(|_| ()); block_until_complete(wait_for, &net, &mut runtime); + + // all peers should have stored the justification for the best finalized block #20 + for peer_id in 0..4 { + let client = net.lock().peers[peer_id].client().as_full().unwrap(); + let justification = crate::aux_schema::best_justification::<_, Block>(&*client) + .unwrap() + .unwrap(); + + assert_eq!(justification.commit.target_number, 20); + } } #[test] @@ -820,11 +838,7 @@ fn allows_reimporting_change_blocks() { let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().unwrap(); let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -844,7 +858,7 @@ fn allows_reimporting_change_blocks() { }; assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, @@ -855,7 +869,7 @@ fn allows_reimporting_change_blocks() { ); assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); } @@ -869,11 +883,7 @@ fn test_bad_justification() { let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -895,7 +905,7 @@ fn test_bad_justification() { }; assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, @@ -906,7 +916,7 @@ fn test_bad_justification() { ); assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), + block_on(block_import.import_block(block(), HashMap::new())).unwrap(), ImportResult::AlreadyInChain ); } @@ -950,9 +960,7 @@ fn voter_persists_its_votes() { let set_state = { let bob_client = net.peer(1).client().clone(); let (_, _, link) = net - .make_block_import::< - TransactionFor - >(bob_client); + .make_block_import(bob_client); let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); let PersistentData { set_state, .. } = persistent_data; set_state @@ -1019,9 +1027,7 @@ fn voter_persists_its_votes() { let alice_client = net.peer(0).client().clone(); let (_block_import, _, link) = net - .make_block_import::< - TransactionFor - >(alice_client); + .make_block_import(alice_client); let link = link.lock().take().unwrap(); let grandpa_params = GrandpaParams { @@ -1422,7 +1428,7 @@ fn grandpa_environment_respects_voting_rules() { // the unrestricted environment should just return the best block assert_eq!( - futures::executor::block_on(unrestricted_env.best_chain_containing( + block_on(unrestricted_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 21, @@ -1431,14 +1437,14 @@ fn grandpa_environment_respects_voting_rules() { // both the other environments should return block 16, which is 3/4 of the // way in the unfinalized chain assert_eq!( - futures::executor::block_on(three_quarters_env.best_chain_containing( + block_on(three_quarters_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 16, ); assert_eq!( - futures::executor::block_on(default_env.best_chain_containing( + block_on(default_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 16, @@ -1449,7 +1455,7 @@ fn grandpa_environment_respects_voting_rules() { // the 3/4 environment should propose block 21 for voting assert_eq!( - futures::executor::block_on(three_quarters_env.best_chain_containing( + block_on(three_quarters_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 21, @@ -1458,7 +1464,7 @@ fn grandpa_environment_respects_voting_rules() { // while the default environment will always still make sure we don't vote // on the best block (2 behind) assert_eq!( - futures::executor::block_on(default_env.best_chain_containing( + block_on(default_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 19, @@ -1471,7 +1477,7 @@ fn grandpa_environment_respects_voting_rules() { // best block, there's a hard rule that we can't cast any votes lower than // the given base (#21). assert_eq!( - futures::executor::block_on(default_env.best_chain_containing( + block_on(default_env.best_chain_containing( peer.client().info().finalized_hash )).unwrap().unwrap().1, 21, @@ -1557,9 +1563,7 @@ fn imports_justification_for_regular_blocks_on_import() { let mut net = GrandpaTestNet::new(api.clone(), 1, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >(client.clone()); + let (mut block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_full().expect("only full clients are used in test"); let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1607,7 +1611,7 @@ fn imports_justification_for_regular_blocks_on_import() { import.fork_choice = Some(ForkChoiceStrategy::LongestChain); assert_eq!( - block_import.import_block(import, HashMap::new()).unwrap(), + block_on(block_import.import_block(import, HashMap::new())).unwrap(), ImportResult::Imported(ImportedAux { needs_justification: false, clear_justification_requests: false, diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 9b3fb9b328560..3ede7649a1387 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -372,7 +372,7 @@ mod tests { .unwrap() .block; - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } let genesis = client diff --git a/client/light/src/fetcher.rs b/client/light/src/fetcher.rs index b71c4871803da..e39cfe07fbf5e 100644 --- a/client/light/src/fetcher.rs +++ b/client/light/src/fetcher.rs @@ -330,10 +330,7 @@ impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() } else { let index: Option = block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); - match index { - Some(index) => self.roots.1.get(index as usize).cloned(), - None => None, - } + index.and_then(|index| self.roots.1.get(index as usize).cloned()) }; Ok(root.map(|root| { diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index a72e65ab3f572..fc5fb9a29ce96 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.36.0", default-features = false } +libp2p = { version = "0.37.1", default-features = false } log = "0.4.8" lru = "0.6.5" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 604165d10074d..3740ebceb6389 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -63,17 +63,17 @@ wasm-timer = "0.2" zeroize = "1.2.0" [dependencies.libp2p] -version = "0.36.0" +version = "0.37.1" [target.'cfg(target_os = "unknown")'.dependencies.libp2p] -version = "0.36.0" +version = "0.37.1" default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "request-response", "tcp-async-io", "websocket", "yamux"] [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.36.0", default-features = false } +libp2p = { version = "0.37.1", default-features = false } quickcheck = "1.0.3" rand = "0.7.2" sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } diff --git a/client/network/src/block_request_handler.rs b/client/network/src/block_request_handler.rs index 2cc888c220f62..633b6b5935edc 100644 --- a/client/network/src/block_request_handler.rs +++ b/client/network/src/block_request_handler.rs @@ -80,6 +80,7 @@ struct SeenRequestsKey { max_blocks: usize, direction: Direction, attributes: BlockAttributes, + support_multiple_justifications: bool, } impl Hash for SeenRequestsKey { @@ -121,14 +122,9 @@ impl BlockRequestHandler { client: Arc>, num_peer_hint: usize, ) -> (Self, ProtocolConfig) { - // Rate of arrival multiplied with the waiting time in the queue equals the queue length. - // - // An average Polkadot node serves less than 5 requests per second. The 95th percentile - // serving a request is less than 2 second. Thus one would estimate the queue length to be - // below 10. - // - // Choosing 20 as the queue length to give some additional buffer. - let (tx, request_receiver) = mpsc::channel(20); + // Reserve enough request slots for one request per peer when we are at the maximum + // number of peers. + let (tx, request_receiver) = mpsc::channel(num_peer_hint); let mut protocol_config = generate_protocol_config(protocol_id); protocol_config.inbound_queue = Some(tx); @@ -185,12 +181,15 @@ impl BlockRequestHandler { let attributes = BlockAttributes::from_be_u32(request.fields)?; + let support_multiple_justifications = request.support_multiple_justifications; + let key = SeenRequestsKey { peer: *peer, max_blocks, direction, from: from_block_id.clone(), attributes, + support_multiple_justifications, }; let mut reputation_changes = Vec::new(); @@ -226,6 +225,7 @@ impl BlockRequestHandler { from_block_id, direction, max_blocks, + support_multiple_justifications, )?; // If any of the blocks contains nay data, we can consider it as successful request. @@ -254,6 +254,7 @@ impl BlockRequestHandler { pending_response.send(OutgoingResponse { result, reputation_changes, + sent_feedback: None, }).map_err(|_| HandleRequestError::SendResponse) } @@ -263,6 +264,7 @@ impl BlockRequestHandler { mut block_id: BlockId, direction: Direction, max_blocks: usize, + support_multiple_justifications: bool, ) -> Result { let get_header = attributes.contains(BlockAttributes::HEADER); let get_body = attributes.contains(BlockAttributes::BODY); @@ -281,22 +283,33 @@ impl BlockRequestHandler { None }; - // TODO: In a follow up PR tracked by https://github.com/paritytech/substrate/issues/8172 - // we want to send/receive all justifications. - // For now we keep compatibility by selecting precisely the GRANDPA one, and not just - // the first one. When sending we could have just taken the first one, since we don't - // expect there to be any other kind currently, but when receiving we need to add the - // engine ID tag. - // The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and will be - // removed when resolving the above issue. - let justification = justifications.and_then(|just| just.into_justification(*b"FRNK")); - - let is_empty_justification = justification - .as_ref() - .map(|j| j.is_empty()) - .unwrap_or(false); - - let justification = justification.unwrap_or_default(); + let (justifications, justification, is_empty_justification) = + if support_multiple_justifications { + let justifications = match justifications { + Some(v) => v.encode(), + None => Vec::new(), + }; + (justifications, Vec::new(), false) + } else { + // For now we keep compatibility by selecting precisely the GRANDPA one, and not just + // the first one. When sending we could have just taken the first one, since we don't + // expect there to be any other kind currently, but when receiving we need to add the + // engine ID tag. + // The ID tag is hardcoded here to avoid depending on the GRANDPA crate, and will be + // removed once we remove the backwards compatibility. + // See: https://github.com/paritytech/substrate/issues/8172 + let justification = + justifications.and_then(|just| just.into_justification(*b"FRNK")); + + let is_empty_justification = justification + .as_ref() + .map(|j| j.is_empty()) + .unwrap_or(false); + + let justification = justification.unwrap_or_default(); + + (Vec::new(), justification, is_empty_justification) + }; let body = if get_body { match self.client.block_body(&BlockId::Hash(hash))? { @@ -324,6 +337,7 @@ impl BlockRequestHandler { message_queue: Vec::new(), justification, is_empty_justification, + justifications, }; total_size += block_data.body.len(); diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index b7c791e392676..f6273c9fb3e02 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -573,9 +573,19 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + let with_peer_id = addr.clone() + .with(Protocol::P2p(self.local_peer_id.clone().into())); + self.known_external_addresses.remove(&with_peer_id); + + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_expired_external_addr(k, addr) + } + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_expired_listen_addr(k, addr) + NetworkBehaviour::inject_expired_listen_addr(k, id, addr) } } @@ -585,9 +595,15 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_new_listener(&mut self, id: ListenerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_listener(k, id) + } + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_listen_addr(k, addr) + NetworkBehaviour::inject_new_listen_addr(k, id, addr) } } @@ -892,7 +908,7 @@ mod tests { first_swarm_peer_id_and_addr = Some((keypair.public().into_peer_id(), listen_addr.clone())) } - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + swarm.listen_on(listen_addr.clone()).unwrap(); (swarm, listen_addr) }).collect::>(); @@ -915,13 +931,13 @@ mod tests { DiscoveryOut::UnroutablePeer(other) | DiscoveryOut::Discovered(other) => { // Call `add_self_reported_address` to simulate identify happening. let addr = swarms.iter().find_map(|(s, a)| - if s.local_peer_id == other { + if s.behaviour().local_peer_id == other { Some(a.clone()) } else { None }) .unwrap(); - swarms[swarm_n].0.add_self_reported_address( + swarms[swarm_n].0.behaviour_mut().add_self_reported_address( &other, [protocol_name_from_protocol_id(&protocol_id)].iter(), addr, diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index cd637f162721e..b000cf575ddb3 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -47,8 +47,10 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) #[derive(Clone)] struct PassThroughVerifier(bool); + + #[async_trait::async_trait] impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, origin: sp_consensus::BlockOrigin, header: B::Header, diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs index fe0a3cb187d55..c0932a466418b 100644 --- a/client/network/src/light_client_requests/handler.rs +++ b/client/network/src/light_client_requests/handler.rs @@ -48,7 +48,7 @@ use std::{ collections::{BTreeMap}, sync::Arc, }; -use log::debug; +use log::{trace, debug}; const LOG_TARGET: &str = "light-client-request-handler"; @@ -82,9 +82,14 @@ impl LightClientRequestHandler { match self.handle_request(peer, payload) { Ok(response_data) => { - let response = OutgoingResponse { result: Ok(response_data), reputation_changes: Vec::new() }; + let response = OutgoingResponse { + result: Ok(response_data), + reputation_changes: Vec::new(), + sent_feedback: None + }; + match pending_response.send(response) { - Ok(()) => debug!( + Ok(()) => trace!( target: LOG_TARGET, "Handled light client request from {}.", peer, @@ -110,7 +115,12 @@ impl LightClientRequestHandler { _ => Vec::new(), }; - let response = OutgoingResponse { result: Err(()), reputation_changes }; + let response = OutgoingResponse { + result: Err(()), + reputation_changes, + sent_feedback: None + }; + if pending_response.send(response).is_err() { debug!( target: LOG_TARGET, diff --git a/client/network/src/light_client_requests/sender.rs b/client/network/src/light_client_requests/sender.rs index 652f465d6f250..bf832ea13aedf 100644 --- a/client/network/src/light_client_requests/sender.rs +++ b/client/network/src/light_client_requests/sender.rs @@ -722,6 +722,7 @@ impl Request { to_block: Default::default(), direction: schema::v1::Direction::Ascending as i32, max_blocks: 1, + support_multiple_justifications: true, }; let mut buf = Vec::with_capacity(rq.encoded_len()); diff --git a/client/network/src/peer_info.rs b/client/network/src/peer_info.rs index 28b913ea40192..39bbd1d870460 100644 --- a/client/network/src/peer_info.rs +++ b/client/network/src/peer_info.rs @@ -23,7 +23,7 @@ use libp2p::core::connection::{ConnectionId, ListenerId}; use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use libp2p::identify::{Identify, IdentifyEvent, IdentifyInfo}; +use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent, IdentifyInfo}; use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; use log::{debug, trace, error}; use smallvec::SmallVec; @@ -86,8 +86,9 @@ impl PeerInfoBehaviour { local_public_key: PublicKey, ) -> Self { let identify = { - let proto_version = "/substrate/1.0".to_string(); - Identify::new(proto_version, user_agent, local_public_key) + let cfg = IdentifyConfig::new("/substrate/1.0".to_string(), local_public_key) + .with_agent_version(user_agent); + Identify::new(cfg) }; PeerInfoBehaviour { @@ -137,13 +138,15 @@ pub struct Node<'a>(&'a NodeInfo); impl<'a> Node<'a> { /// Returns the endpoint of an established connection to the peer. - pub fn endpoint(&self) -> &'a ConnectedPoint { - &self.0.endpoints[0] // `endpoints` are non-empty by definition + /// + /// Returns `None` if we are disconnected from the node. + pub fn endpoint(&self) -> Option<&'a ConnectedPoint> { + self.0.endpoints.get(0) } /// Returns the latest version information we know of. pub fn client_version(&self) -> Option<&'a str> { - self.0.client_version.as_ref().map(|s| &s[..]) + self.0.client_version.as_deref() } /// Returns the latest ping time we know of for this node. `None` if we never successfully @@ -253,14 +256,19 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_dial_failure(peer_id); } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_new_listen_addr(addr); - self.identify.inject_new_listen_addr(addr); + fn inject_new_listener(&mut self, id: ListenerId) { + self.ping.inject_new_listener(id); + self.identify.inject_new_listener(id); + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.ping.inject_new_listen_addr(id, addr); + self.identify.inject_new_listen_addr(id, addr); } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_expired_listen_addr(addr); - self.identify.inject_expired_listen_addr(addr); + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.ping.inject_expired_listen_addr(id, addr); + self.identify.inject_expired_listen_addr(id, addr); } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { @@ -268,6 +276,11 @@ impl NetworkBehaviour for PeerInfoBehaviour { self.identify.inject_new_external_addr(addr); } + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_expired_external_addr(addr); + self.identify.inject_expired_external_addr(addr); + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { self.ping.inject_listener_error(id, err); self.identify.inject_listener_error(id, err); @@ -323,6 +336,7 @@ impl NetworkBehaviour for PeerInfoBehaviour { } IdentifyEvent::Error { peer_id, error } => debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error), + IdentifyEvent::Pushed { .. } => {} IdentifyEvent::Sent { .. } => {} } }, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 84b5285b38ada..ff64b9d599c04 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -478,7 +478,7 @@ impl Protocol { /// Inform sync about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - trace!(target: "sync", "New best block imported {:?}/#{}", hash, number); + debug!(target: "sync", "New best block imported {:?}/#{}", hash, number); self.sync.update_chain_info(&hash, number); @@ -522,11 +522,13 @@ impl Protocol { if self.important_peers.contains(&peer) { warn!(target: "sync", "Reserved peer {} disconnected", peer); } else { - trace!(target: "sync", "{} disconnected", peer); + debug!(target: "sync", "{} disconnected", peer); } if let Some(_peer_data) = self.peers.remove(&peer) { - self.sync.peer_disconnected(&peer); + if let Some(sync::OnBlockData::Import(origin, blocks)) = self.sync.peer_disconnected(&peer) { + self.pending_messages.push_back(CustomMessageOutcome::BlockImport(origin, blocks)); + } Ok(()) } else { Err(()) @@ -578,6 +580,11 @@ impl Protocol { } else { None }, + justifications: if !block_data.justifications.is_empty() { + Some(DecodeAll::decode_all(&mut block_data.justifications.as_ref())?) + } else { + None + }, }) }).collect::, codec::Error>>(); @@ -906,6 +913,7 @@ impl Protocol { receipt: None, message_queue: None, justification: None, + justifications: None, }, ], }, @@ -1121,6 +1129,7 @@ fn prepare_block_request( to_block: request.to.map(|h| h.encode()).unwrap_or_default(), direction: request.direction as i32, max_blocks: request.max.unwrap_or(0), + support_multiple_justifications: true, }; CustomMessageOutcome::BlockRequest { @@ -1230,7 +1239,7 @@ impl NetworkBehaviour for Protocol { let protobuf_response = match crate::schema::v1::BlockResponse::decode(&resp[..]) { Ok(proto) => proto, Err(e) => { - trace!(target: "sync", "Failed to decode block request to peer {:?}: {:?}.", id, e); + debug!(target: "sync", "Failed to decode block request to peer {:?}: {:?}.", id, e); self.peerset_handle.report_peer(id.clone(), rep::BAD_MESSAGE); self.behaviour.disconnect_peer(id, HARDCODED_PEERSETS_SYNC); continue; @@ -1241,7 +1250,7 @@ impl NetworkBehaviour for Protocol { }, Poll::Ready(Ok(Err(e))) => { peer.block_request.take(); - trace!(target: "sync", "Block request to peer {:?} failed: {:?}.", id, e); + debug!(target: "sync", "Block request to peer {:?} failed: {:?}.", id, e); match e { RequestFailure::Network(OutboundFailure::Timeout) => { @@ -1438,7 +1447,7 @@ impl NetworkBehaviour for Protocol { if self.on_sync_peer_disconnected(peer_id.clone()).is_ok() { CustomMessageOutcome::SyncDisconnected(peer_id) } else { - log::debug!( + log::trace!( target: "sync", "Disconnected peer which had earlier been refused by on_sync_peer_connected {}", peer_id @@ -1476,7 +1485,7 @@ impl NetworkBehaviour for Protocol { } } HARDCODED_PEERSETS_SYNC => { - debug!( + trace!( target: "sync", "Received sync for peer earlier refused by sync layer: {}", peer_id @@ -1521,18 +1530,26 @@ impl NetworkBehaviour for Protocol { self.behaviour.inject_dial_failure(peer_id) } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_new_listen_addr(addr) + fn inject_new_listener(&mut self, id: ListenerId) { + self.behaviour.inject_new_listener(id) } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_expired_listen_addr(addr) + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.behaviour.inject_new_listen_addr(id, addr) + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.behaviour.inject_expired_listen_addr(id, addr) } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { self.behaviour.inject_new_external_addr(addr) } + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_expired_external_addr(addr) + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { self.behaviour.inject_listener_error(id, err); } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 01e9a5d7215af..dc6beac99aa01 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -144,11 +144,31 @@ pub struct RemoteReadResponse { pub proof: StorageProof, } +/// Announcement summary used for debug logging. +#[derive(Debug)] +pub struct AnnouncementSummary { + block_hash: H::Hash, + number: H::Number, + parent_hash: H::Hash, + state: Option, +} + +impl generic::BlockAnnounce { + pub fn summary(&self) -> AnnouncementSummary { + AnnouncementSummary { + block_hash: self.header.hash(), + number: *self.header.number(), + parent_hash: self.header.parent_hash().clone(), + state: self.state, + } + } +} + /// Generic types. pub mod generic { use bitflags::bitflags; use codec::{Encode, Decode, Input, Output}; - use sp_runtime::EncodedJustification; + use sp_runtime::{EncodedJustification, Justifications}; use super::{ RemoteReadResponse, Transactions, Direction, RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, @@ -234,6 +254,8 @@ pub mod generic { pub message_queue: Option>, /// Justification if requested. pub justification: Option, + /// Justifications if requested. + pub justifications: Option, } /// Identifies starting point of a block sequence. diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 08c4ec5d4f7b3..6b17c5253f364 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -31,7 +31,7 @@ use libp2p::swarm::{ NotifyHandler, PollParameters }; -use log::{debug, error, trace, warn}; +use log::{error, trace, warn}; use parking_lot::RwLock; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; @@ -409,7 +409,7 @@ impl Notifications { /// Disconnects the given peer if we are connected to it. pub fn disconnect_peer(&mut self, peer_id: &PeerId, set_id: sc_peerset::SetId) { - debug!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "External API => Disconnect({}, {:?})", peer_id, set_id); self.disconnect_peer_inner(peer_id, set_id, None); } @@ -440,7 +440,7 @@ impl Notifications { timer_deadline, timer: _ } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); let backoff_until = Some(if let Some(ban) = ban { cmp::max(timer_deadline, Instant::now() + ban) @@ -457,11 +457,11 @@ impl Notifications { // All open or opening connections are sent a `Close` message. // If relevant, the external API is instantly notified. PeerState::Enabled { mut connections } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id); let event = NotificationsOut::CustomProtocolClosed { peer_id: peer_id.clone(), set_id, @@ -472,7 +472,7 @@ impl Notifications { for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*connec_id), @@ -484,7 +484,7 @@ impl Notifications { for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::Opening)) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*connec_id), @@ -520,7 +520,7 @@ impl Notifications { for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*connec_id), @@ -573,7 +573,7 @@ impl Notifications { ) { let notifs_sink = match self.peers.get(&(target.clone(), set_id)).and_then(|p| p.get_open()) { None => { - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Tried to sent notification to {:?} without an open channel.", target); return @@ -607,9 +607,9 @@ impl Notifications { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { // If there's no entry in `self.peers`, start dialing. - debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Starting to connect", + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Starting to connect", entry.key().0, set_id); - debug!(target: "sub-libp2p", "Libp2p <= Dial {}", entry.key().0); + trace!(target: "sub-libp2p", "Libp2p <= Dial {}", entry.key().0); // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id: entry.key().0.clone(), @@ -626,7 +626,7 @@ impl Notifications { // Backoff (not expired) => PendingRequest PeerState::Backoff { ref timer, ref timer_deadline } if *timer_deadline > now => { let peer_id = occ_entry.key().0.clone(); - debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Will start to connect at \ + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Will start to connect at \ until {:?}", peer_id, set_id, timer_deadline); *occ_entry.into_mut() = PeerState::PendingRequest { timer: *timer, @@ -636,9 +636,9 @@ impl Notifications { // Backoff (expired) => Requested PeerState::Backoff { .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Starting to connect", + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Starting to connect", occ_entry.key().0, set_id); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); + trace!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id: occ_entry.key().0.clone(), @@ -653,7 +653,7 @@ impl Notifications { backoff_until: Some(ref backoff) } if *backoff > now => { let peer_id = occ_entry.key().0.clone(); - debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): But peer is backed-off until {:?}", + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): But peer is backed-off until {:?}", peer_id, set_id, backoff); let delay_id = self.next_delay_id; @@ -681,9 +681,9 @@ impl Notifications { if let Some((connec_id, connec_state)) = connections.iter_mut() .find(|(_, s)| matches!(s, ConnectionState::Closed)) { - debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", occ_entry.key().0, set_id); - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*connec_id), @@ -697,7 +697,7 @@ impl Notifications { debug_assert!(connections.iter().any(|(_, s)| { matches!(s, ConnectionState::OpeningThenClosing | ConnectionState::Closing) })); - debug!( + trace!( target: "sub-libp2p", "PSM => Connect({}, {:?}): No connection in proper state. Delaying.", occ_entry.key().0, set_id @@ -731,7 +731,7 @@ impl Notifications { // Incoming => Enabled PeerState::Incoming { mut connections, .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", + trace!(target: "sub-libp2p", "PSM => Connect({}, {:?}): Enabling connections.", occ_entry.key().0, set_id); if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == occ_entry.key().0 && i.set_id == set_id && i.alive) { @@ -745,7 +745,7 @@ impl Notifications { for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", occ_entry.key(), *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: occ_entry.key().0.clone(), @@ -793,7 +793,7 @@ impl Notifications { let mut entry = match self.peers.entry((peer_id, set_id)) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { - debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", entry.key().0, set_id); return } @@ -801,7 +801,7 @@ impl Notifications { match mem::replace(entry.get_mut(), PeerState::Poisoned) { st @ PeerState::Disabled { .. } | st @ PeerState::Backoff { .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Already disabled.", entry.key().0, set_id); *entry.into_mut() = st; }, @@ -809,7 +809,7 @@ impl Notifications { // DisabledPendingEnable => Disabled PeerState::DisabledPendingEnable { connections, timer_deadline, timer: _ } => { debug_assert!(!connections.is_empty()); - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Interrupting pending enabling.", entry.key().0, set_id); *entry.into_mut() = PeerState::Disabled { @@ -820,14 +820,14 @@ impl Notifications { // Enabled => Disabled PeerState::Enabled { mut connections } => { - debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.", + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Disabling connections.", entry.key().0, set_id); debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening | ConnectionState::Open(_)))); if connections.iter().any(|(_, s)| matches!(s, ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id); + trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", entry.key().0, set_id); let event = NotificationsOut::CustomProtocolClosed { peer_id: entry.key().0.clone(), set_id, @@ -838,7 +838,7 @@ impl Notifications { for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::Opening)) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", entry.key(), *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: entry.key().0.clone(), @@ -851,7 +851,7 @@ impl Notifications { for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", entry.key(), *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: entry.key().0.clone(), @@ -869,14 +869,14 @@ impl Notifications { // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other // sub-systems (such as the discovery mechanism) may require dialing this peer as // well at the same time. - debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected.", + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected.", entry.key().0, set_id); entry.remove(); }, // PendingRequest => Backoff PeerState::PendingRequest { timer, timer_deadline } => { - debug!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected", + trace!(target: "sub-libp2p", "PSM => Drop({}, {:?}): Not yet connected", entry.key().0, set_id); *entry.into_mut() = PeerState::Backoff { timer, timer_deadline } }, @@ -906,13 +906,13 @@ impl Notifications { }; if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", + trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Obsolete incoming", index, incoming.peer_id, incoming.set_id); match self.peers.get_mut(&(incoming.peer_id.clone(), incoming.set_id)) { Some(PeerState::DisabledPendingEnable { .. }) | Some(PeerState::Enabled { .. }) => {} _ => { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", incoming.peer_id, incoming.set_id); self.peerset.dropped(incoming.set_id, incoming.peer_id, sc_peerset::DropReason::Unknown); }, @@ -931,14 +931,14 @@ impl Notifications { match mem::replace(state, PeerState::Poisoned) { // Incoming => Enabled PeerState::Incoming { mut connections, .. } => { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", + trace!(target: "sub-libp2p", "PSM => Accept({:?}, {}, {:?}): Enabling connections.", index, incoming.peer_id, incoming.set_id); debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", incoming.peer_id, *connec_id, incoming.set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: incoming.peer_id.clone(), @@ -971,7 +971,7 @@ impl Notifications { }; if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ + trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Obsolete incoming, \ ignoring", index, incoming.peer_id, incoming.set_id); return } @@ -987,14 +987,14 @@ impl Notifications { match mem::replace(state, PeerState::Poisoned) { // Incoming => Disabled PeerState::Incoming { mut connections, backoff_until } => { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", + trace!(target: "sub-libp2p", "PSM => Reject({:?}, {}, {:?}): Rejecting connections.", index, incoming.peer_id, incoming.set_id); debug_assert!(connections.iter().any(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote))); for (connec_id, connec_state) in connections.iter_mut() .filter(|(_, s)| matches!(s, ConnectionState::OpenDesiredByRemote)) { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Close({:?})", incoming.peer_id, connec_id, incoming.set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: incoming.peer_id.clone(), @@ -1034,11 +1034,11 @@ impl NetworkBehaviour for Notifications { // Requested | PendingRequest => Enabled st @ &mut PeerState::Requested | st @ &mut PeerState::PendingRequest { .. } => { - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}): Connection was requested by PSM.", peer_id, set_id, endpoint ); - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *conn, set_id); + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", peer_id, *conn, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), handler: NotifyHandler::One(*conn), @@ -1059,7 +1059,7 @@ impl NetworkBehaviour for Notifications { } else { None }; - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}, {:?}): Not requested by PSM, disabling.", peer_id, set_id, endpoint, *conn); @@ -1074,7 +1074,7 @@ impl NetworkBehaviour for Notifications { PeerState::Disabled { connections, .. } | PeerState::DisabledPendingEnable { connections, .. } | PeerState::Enabled { connections, .. } => { - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}, {:?}, {:?}): Secondary connection. Leaving closed.", peer_id, set_id, endpoint, *conn); connections.push((*conn, ConnectionState::Closed)); @@ -1096,7 +1096,7 @@ impl NetworkBehaviour for Notifications { match mem::replace(entry.get_mut(), PeerState::Poisoned) { // Disabled => Disabled | Backoff | Ø PeerState::Disabled { mut connections, backoff_until } => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.", + trace!(target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled.", peer_id, set_id, *conn); if let Some(pos) = connections.iter().position(|(c, _)| *c == *conn) { @@ -1137,7 +1137,7 @@ impl NetworkBehaviour for Notifications { // DisabledPendingEnable => DisabledPendingEnable | Backoff PeerState::DisabledPendingEnable { mut connections, timer_deadline, timer } => { - debug!( + trace!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Disabled but pending enable.", peer_id, set_id, *conn @@ -1152,7 +1152,7 @@ impl NetworkBehaviour for Notifications { } if connections.is_empty() { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); *entry.get_mut() = PeerState::Backoff { timer, timer_deadline }; @@ -1165,7 +1165,7 @@ impl NetworkBehaviour for Notifications { // Incoming => Incoming | Disabled | Backoff | Ø PeerState::Incoming { mut connections, backoff_until } => { - debug!( + trace!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): OpenDesiredByRemote.", peer_id, set_id, *conn @@ -1236,7 +1236,7 @@ impl NetworkBehaviour for Notifications { // Enabled => Enabled | Backoff // Peers are always backed-off when disconnecting while Enabled. PeerState::Enabled { mut connections } => { - debug!( + trace!( target: "sub-libp2p", "Libp2p => Disconnected({}, {:?}, {:?}): Enabled.", peer_id, set_id, *conn @@ -1260,7 +1260,7 @@ impl NetworkBehaviour for Notifications { .next() { if pos <= replacement_pos { - debug!( + trace!( target: "sub-libp2p", "External API <= Sink replaced({}, {:?})", peer_id, set_id @@ -1273,7 +1273,7 @@ impl NetworkBehaviour for Notifications { self.events.push_back(NetworkBehaviourAction::GenerateEvent(event)); } } else { - debug!( + trace!( target: "sub-libp2p", "External API <= Closed({}, {:?})", peer_id, set_id ); @@ -1292,7 +1292,7 @@ impl NetworkBehaviour for Notifications { } if connections.is_empty() { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); @@ -1313,7 +1313,7 @@ impl NetworkBehaviour for Notifications { } else if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); *entry.get_mut() = PeerState::Disabled { @@ -1351,7 +1351,7 @@ impl NetworkBehaviour for Notifications { } fn inject_dial_failure(&mut self, peer_id: &PeerId) { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); for set_id in (0..self.notif_protocols.len()).map(sc_peerset::SetId::from) { if let Entry::Occupied(mut entry) = self.peers.entry((peer_id.clone(), set_id)) { @@ -1364,7 +1364,7 @@ impl NetworkBehaviour for Notifications { // "Basic" situation: we failed to reach a peer that the peerset requested. st @ PeerState::Requested | st @ PeerState::PendingRequest { .. } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", peer_id, set_id); self.peerset.dropped(set_id, peer_id.clone(), sc_peerset::DropReason::Unknown); let now = Instant::now(); @@ -1415,7 +1415,7 @@ impl NetworkBehaviour for Notifications { NotifsHandlerOut::OpenDesiredByRemote { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}]) => OpenDesiredByRemote({:?})", source, connection, set_id); @@ -1463,7 +1463,7 @@ impl NetworkBehaviour for Notifications { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", source, connection, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source, @@ -1503,7 +1503,7 @@ impl NetworkBehaviour for Notifications { let incoming_id = self.next_incoming_index; self.next_incoming_index.0 += 1; - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + trace!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", source, incoming_id); self.peerset.incoming(set_id, source.clone(), incoming_id); self.incoming.push(IncomingPeer { @@ -1539,7 +1539,7 @@ impl NetworkBehaviour for Notifications { PeerState::DisabledPendingEnable { mut connections, timer, timer_deadline } => { if let Some((_, connec_state)) = connections.iter_mut().find(|(c, _)| *c == connection) { if let ConnectionState::Closed = *connec_state { - debug!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) <= Open({:?})", source, connection, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source.clone(), @@ -1587,7 +1587,7 @@ impl NetworkBehaviour for Notifications { NotifsHandlerOut::CloseDesired { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Handler({}, {:?}) => CloseDesired({:?})", source, connection, set_id); @@ -1622,7 +1622,7 @@ impl NetworkBehaviour for Notifications { debug_assert!(matches!(connections[pos].1, ConnectionState::Open(_))); connections[pos].1 = ConnectionState::Closing; - debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Close({:?})", source, connection, set_id); + trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Close({:?})", source, connection, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: source.clone(), handler: NotifyHandler::One(connection), @@ -1641,7 +1641,7 @@ impl NetworkBehaviour for Notifications { .next() { if pos <= replacement_pos { - debug!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); + trace!(target: "sub-libp2p", "External API <= Sink replaced({:?})", source); let event = NotificationsOut::CustomProtocolReplaced { peer_id: source, set_id, @@ -1655,7 +1655,7 @@ impl NetworkBehaviour for Notifications { } else { // List of open connections wasn't empty before but now it is. if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening)) { - debug!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", source, set_id); + trace!(target: "sub-libp2p", "PSM <= Dropped({}, {:?})", source, set_id); self.peerset.dropped(set_id, source.clone(), sc_peerset::DropReason::Refused); *entry.into_mut() = PeerState::Disabled { connections, backoff_until: None @@ -1664,7 +1664,7 @@ impl NetworkBehaviour for Notifications { *entry.into_mut() = PeerState::Enabled { connections }; } - debug!(target: "sub-libp2p", "External API <= Closed({}, {:?})", source, set_id); + trace!(target: "sub-libp2p", "External API <= Closed({}, {:?})", source, set_id); let event = NotificationsOut::CustomProtocolClosed { peer_id: source, set_id, @@ -1692,7 +1692,7 @@ impl NetworkBehaviour for Notifications { NotifsHandlerOut::CloseResult { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Handler({}, {:?}) => CloseResult({:?})", source, connection, set_id); @@ -1724,7 +1724,7 @@ impl NetworkBehaviour for Notifications { NotifsHandlerOut::OpenResultOk { protocol_index, received_handshake, notifications_sink, .. } => { let set_id = sc_peerset::SetId::from(protocol_index); - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Handler({}, {:?}) => OpenResultOk({:?})", source, connection, set_id); @@ -1738,7 +1738,7 @@ impl NetworkBehaviour for Notifications { *c == connection && matches!(s, ConnectionState::Opening)) { if !any_open { - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + trace!(target: "sub-libp2p", "External API <= Open({:?})", source); let event = NotificationsOut::CustomProtocolOpen { peer_id: source, set_id, @@ -1785,7 +1785,7 @@ impl NetworkBehaviour for Notifications { NotifsHandlerOut::OpenResultErr { protocol_index } => { let set_id = sc_peerset::SetId::from(protocol_index); - debug!(target: "sub-libp2p", + trace!(target: "sub-libp2p", "Handler({:?}, {:?}) => OpenResultErr({:?})", source, connection, set_id); @@ -1820,7 +1820,7 @@ impl NetworkBehaviour for Notifications { if !connections.iter().any(|(_, s)| matches!(s, ConnectionState::Opening | ConnectionState::Open(_))) { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + trace!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); self.peerset.dropped(set_id, source.clone(), sc_peerset::DropReason::Refused); *entry.into_mut() = PeerState::Disabled { @@ -1946,12 +1946,12 @@ impl NetworkBehaviour for Notifications { match peer_state { PeerState::Backoff { timer, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); + trace!(target: "sub-libp2p", "Libp2p <= Clean up ban of {:?} from the state", peer_id); self.peers.remove(&(peer_id, set_id)); } PeerState::PendingRequest { timer, .. } if *timer == delay_id => { - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + trace!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); // The `DialPeerCondition` ensures that dial attempts are de-duplicated self.events.push_back(NetworkBehaviourAction::DialPeer { peer_id, @@ -1967,7 +1967,7 @@ impl NetworkBehaviour for Notifications { if let Some((connec_id, connec_state)) = connections.iter_mut() .find(|(_, s)| matches!(s, ConnectionState::Closed)) { - debug!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)", + trace!(target: "sub-libp2p", "Handler({}, {:?}) <= Open({:?}) (ban expired)", peer_id, *connec_id, set_id); self.events.push_back(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), diff --git a/client/network/src/protocol/notifications/tests.rs b/client/network/src/protocol/notifications/tests.rs index f159a8e631782..8efe897afec3a 100644 --- a/client/network/src/protocol/notifications/tests.rs +++ b/client/network/src/protocol/notifications/tests.rs @@ -97,7 +97,7 @@ fn build_nodes() -> (Swarm, Swarm) { behaviour, keypairs[index].public().into_peer_id() ); - Swarm::listen_on(&mut swarm, addrs[index].clone()).unwrap(); + swarm.listen_on(addrs[index].clone()).unwrap(); out.push(swarm); } @@ -192,18 +192,26 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.inject_dial_failure(peer_id) } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_new_listen_addr(addr) + fn inject_new_listener(&mut self, id: ListenerId) { + self.inner.inject_new_listener(id) } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_expired_listen_addr(addr) + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.inner.inject_new_listen_addr(id, addr) + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + self.inner.inject_expired_listen_addr(id, addr) } fn inject_new_external_addr(&mut self, addr: &Multiaddr) { self.inner.inject_new_external_addr(addr) } + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_expired_external_addr(addr) + } + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { self.inner.inject_listener_error(id, err); } @@ -245,7 +253,7 @@ fn reconnect_after_disconnect() { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; if service2_state == ServiceState::FirstConnec { - service1.disconnect_peer( + service1.behaviour_mut().disconnect_peer( Swarm::local_peer_id(&service2), sc_peerset::SetId::from(0) ); @@ -267,7 +275,7 @@ fn reconnect_after_disconnect() { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; if service1_state == ServiceState::FirstConnec { - service1.disconnect_peer( + service1.behaviour_mut().disconnect_peer( Swarm::local_peer_id(&service2), sc_peerset::SetId::from(0) ); diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index bf193312c5966..83d4e109e1ea4 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -505,9 +505,10 @@ impl ChainSync { } } - /// Number of active sync requests. + /// Number of active forks requests. This includes + /// requests that are pending or could be issued right away. pub fn num_sync_requests(&self) -> usize { - self.fork_targets.len() + self.fork_targets.values().filter(|f| f.number <= self.best_queued_number).count() } /// Number of downloaded blocks. @@ -737,10 +738,19 @@ impl ChainSync { // If our best queued is more than `MAX_BLOCKS_TO_LOOK_BACKWARDS` blocks away from the // common number, the peer best number is higher than our best queued and the common // number is smaller than the last finalized block number, we should do an ancestor - // search to find a better common block. + // search to find a better common block. If the queue is full we wait till all blocks are + // imported though. if best_queued.saturating_sub(peer.common_number) > MAX_BLOCKS_TO_LOOK_BACKWARDS.into() && best_queued < peer.best_number && peer.common_number < last_finalized + && queue.len() <= MAJOR_SYNC_BLOCKS.into() { + trace!( + target: "sync", + "Peer {:?} common block {} too far behind of our best {}. Starting ancestry search.", + id, + peer.common_number, + best_queued, + ); let current = std::cmp::min(peer.best_number, best_queued); peer.state = PeerSyncState::AncestorSearch { current, @@ -803,7 +813,7 @@ impl ChainSync { response: BlockResponse ) -> Result, BadPeer> { self.downloaded_blocks += response.blocks.len(); - let mut new_blocks: Vec> = + let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { let mut blocks = response.blocks; if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { @@ -823,8 +833,9 @@ impl ChainSync { .drain(self.best_queued_number + One::one()) .into_iter() .map(|block_data| { - let justifications = - legacy_justification_mapping(block_data.block.justification); + let justifications = block_data.block.justifications.or( + legacy_justification_mapping(block_data.block.justification) + ); IncomingBlock { hash: block_data.block.hash, header: block_data.block.header, @@ -844,11 +855,14 @@ impl ChainSync { } validate_blocks::(&blocks, who, Some(request))?; blocks.into_iter().map(|b| { + let justifications = b.justifications.or( + legacy_justification_mapping(b.justification) + ); IncomingBlock { hash: b.hash, header: b.header, body: b.body, - justifications: legacy_justification_mapping(b.justification), + justifications, origin: Some(who.clone()), allow_missing_state: true, import_existing: false, @@ -953,11 +967,14 @@ impl ChainSync { // When request.is_none() this is a block announcement. Just accept blocks. validate_blocks::(&blocks, who, None)?; blocks.into_iter().map(|b| { + let justifications = b.justifications.or( + legacy_justification_mapping(b.justification) + ); IncomingBlock { hash: b.hash, header: b.header, body: b.body, - justifications: legacy_justification_mapping(b.justification), + justifications, origin: Some(who.clone()), allow_missing_state: true, import_existing: false, @@ -969,6 +986,13 @@ impl ChainSync { return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)); }; + Ok(self.validate_and_queue_blocks(new_blocks)) + } + + fn validate_and_queue_blocks( + &mut self, + mut new_blocks: Vec>, + ) -> OnBlockData { let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); if new_blocks.len() != orig_len { @@ -991,10 +1015,8 @@ impl ChainSync { ); self.on_block_queued(h, n) } - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - - Ok(OnBlockData::Import(origin, new_blocks)) + OnBlockData::Import(origin, new_blocks) } /// Handle a response from the remote to a justification request that we made. @@ -1028,7 +1050,7 @@ impl ChainSync { return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); } - block.justification + block.justifications.or(legacy_justification_mapping(block.justification)) } else { // we might have asked the peer for a justification on a block that we assumed it // had but didn't (regardless of whether it had a justification for it or not). @@ -1043,7 +1065,7 @@ impl ChainSync { if let Some((peer, hash, number, j)) = self .extra_justifications - .on_response(who, legacy_justification_mapping(justification)) + .on_response(who, justification) { return Ok(OnBlockJustification::Import { peer, hash, number, justifications: j }) } @@ -1356,7 +1378,7 @@ impl ChainSync { PreValidateBlockAnnounce::Failure { who, disconnect } } Err(e) => { - error!( + debug!( target: "sync", "💔 Block announcement validation of block {:?} errored: {}", hash, @@ -1425,23 +1447,36 @@ impl ChainSync { &mut self, pre_validation_result: PreValidateBlockAnnounce, ) -> PollBlockAnnounceValidation { - trace!( - target: "sync", - "Finished block announce validation: {:?}", - pre_validation_result, - ); - let (announce, is_best, who) = match pre_validation_result { PreValidateBlockAnnounce::Failure { who, disconnect } => { + debug!( + target: "sync", + "Failed announce validation: {:?}, disconnect: {}", + who, + disconnect, + ); return PollBlockAnnounceValidation::Failure { who, disconnect } }, PreValidateBlockAnnounce::Process { announce, is_new_best, who } => { (announce, is_new_best, who) }, - PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => - return PollBlockAnnounceValidation::Skip, + PreValidateBlockAnnounce::Error { .. } | PreValidateBlockAnnounce::Skip => { + debug!( + target: "sync", + "Ignored announce validation", + ); + return PollBlockAnnounceValidation::Skip + }, }; + trace!( + target: "sync", + "Finished block announce validation: from {:?}: {:?}. local_best={}", + who, + announce.summary(), + is_best, + ); + let number = *announce.header.number(); let hash = announce.header.hash(); let parent_status = self.block_status(announce.header.parent_hash()).unwrap_or(BlockStatus::Unknown); @@ -1512,34 +1547,54 @@ impl ChainSync { return PollBlockAnnounceValidation::ImportHeader { is_best, announce, who } } - if number <= self.best_queued_number { - trace!( - target: "sync", - "Added sync target for block announced from {}: {} {:?}", - who, - hash, - announce.header, - ); - self.fork_targets - .entry(hash.clone()) - .or_insert_with(|| ForkTarget { - number, - parent_hash: Some(*announce.header.parent_hash()), - peers: Default::default(), - }) - .peers.insert(who.clone()); - } + trace!( + target: "sync", + "Added sync target for block announced from {}: {} {:?}", + who, + hash, + announce.summary(), + ); + self.fork_targets + .entry(hash.clone()) + .or_insert_with(|| ForkTarget { + number, + parent_hash: Some(*announce.header.parent_hash()), + peers: Default::default(), + }) + .peers.insert(who.clone()); - trace!(target: "sync", "Announce validation result is nothing"); PollBlockAnnounceValidation::Nothing { is_best, who, announce } } /// Call when a peer has disconnected. - pub fn peer_disconnected(&mut self, who: &PeerId) { + /// Canceled obsolete block request may result in some blocks being ready for + /// import, so this functions checks for such blocks and returns them. + pub fn peer_disconnected(&mut self, who: &PeerId) -> Option> { self.blocks.clear_peer_download(who); self.peers.remove(who); self.extra_justifications.peer_disconnected(who); self.pending_requests.set_all(); + let blocks: Vec<_> = self.blocks + .drain(self.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = + legacy_justification_mapping(block_data.block.justification); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: false, + } + }).collect(); + if !blocks.is_empty() { + Some(self.validate_and_queue_blocks(blocks)) + } else { + None + } } /// Restart the sync process. This will reset all pending block requests and return an iterator @@ -1556,11 +1611,13 @@ impl ChainSync { debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); let old_peers = std::mem::take(&mut self.peers); - old_peers.into_iter().filter_map(move |(id, p)| { + old_peers.into_iter().filter_map(move |(id, mut p)| { // peers that were downloading justifications // should be kept in that state. match p.state { PeerSyncState::DownloadingJustification(_) => { + // We make sure our commmon number is at least something we have. + p.common_number = info.best_number; self.peers.insert(id, p); return None; } @@ -1609,7 +1666,7 @@ impl ChainSync { // This is purely during a backwards compatible transitionary period and should be removed // once we can assume all nodes can send and receive multiple Justifications // The ID tag is hardcoded here to avoid depending on the GRANDPA crate. -// TODO: https://github.com/paritytech/substrate/issues/8172 +// See: https://github.com/paritytech/substrate/issues/8172 fn legacy_justification_mapping(justification: Option) -> Option { justification.map(|just| (*b"FRNK", just).into()) } @@ -2020,7 +2077,7 @@ mod test { let mut new_blocks = |n| { for _ in 0..n { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); } let info = client.info(); @@ -2068,6 +2125,14 @@ mod test { sync.peers.get(&peer_id3).unwrap().state, PeerSyncState::DownloadingJustification(b1_hash), ); + + // Set common block to something that we don't have (e.g. failed import) + sync.peers.get_mut(&peer_id3).unwrap().common_number = 100; + let _ = sync.restart().count(); + assert_eq!( + sync.peers.get(&peer_id3).unwrap().common_number, + 50 + ); } /// Send a block annoucnement for the given `header`. @@ -2109,6 +2174,7 @@ mod test { receipt: None, message_queue: None, justification: None, + justifications: None, } ).collect(), } @@ -2151,7 +2217,7 @@ mod test { let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); block } @@ -2192,7 +2258,7 @@ mod test { let block = block_builder.build().unwrap().block; if import { - client2.import(BlockOrigin::Own, block.clone()).unwrap(); + block_on(client2.import(BlockOrigin::Own, block.clone())).unwrap(); } block @@ -2217,7 +2283,7 @@ mod test { send_block_announce(block3_fork.header().clone(), &peer_id2, &mut sync); // Import and tell sync that we now have the fork. - client.import(BlockOrigin::Own, block3_fork.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, block3_fork.clone())).unwrap(); sync.update_chain_info(&block3_fork.hash(), 3); let block4 = build_block_at(block3_fork.hash(), false); @@ -2329,9 +2395,12 @@ mod test { resp_blocks.into_iter() .rev() - .for_each(|b| client.import_as_final(BlockOrigin::Own, b).unwrap()); + .for_each(|b| block_on(client.import_as_final(BlockOrigin::Own, b)).unwrap()); } + // "Wait" for the queue to clear + sync.queue_blocks.clear(); + // Let peer2 announce that it finished syncing send_block_announce(best_block.header().clone(), &peer_id2, &mut sync); @@ -2392,7 +2461,7 @@ mod test { let mut client = Arc::new(TestClientBuilder::new().build()); let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] .into_iter() - .inspect(|b| client.import(BlockOrigin::Own, (*b).clone()).unwrap()) + .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) .cloned() .collect::>(); @@ -2496,7 +2565,7 @@ mod test { resp_blocks.into_iter() .rev() - .for_each(|b| client.import(BlockOrigin::Own, b).unwrap()); + .for_each(|b| block_on(client.import(BlockOrigin::Own, b)).unwrap()); } // Request the tip diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 60492f24ed8c3..81f9cffacaab4 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -228,6 +228,7 @@ mod test { message_queue: None, receipt: None, justification: None, + justifications: None, }).collect() } diff --git a/client/network/src/request_responses.rs b/client/network/src/request_responses.rs index e8ca2795ea79d..3762cf70e71d4 100644 --- a/client/network/src/request_responses.rs +++ b/client/network/src/request_responses.rs @@ -133,9 +133,20 @@ pub struct OutgoingResponse { /// /// `Err(())` if none is available e.g. due an error while handling the request. pub result: Result, ()>, + /// Reputation changes accrued while handling the request. To be applied to the reputation of /// the peer sending the request. pub reputation_changes: Vec, + + /// If provided, the `oneshot::Sender` will be notified when the request has been sent to the + /// peer. + /// + /// > **Note**: Operating systems typically maintain a buffer of a few dozen kilobytes of + /// > outgoing data for each TCP socket, and it is not possible for a user + /// > application to inspect this buffer. This channel here is not actually notified + /// > when the response has been fully sent out, but rather when it has fully been + /// > written to the buffer managed by the operating system. + pub sent_feedback: Option>, } /// Event generated by the [`RequestResponsesBehaviour`]. @@ -240,6 +251,10 @@ pub struct RequestResponsesBehaviour { /// Whenever an incoming request arrives, the arrival [`Instant`] is recorded here. pending_responses_arrival_time: HashMap, + + /// Whenever a response is received on `pending_responses`, insert a channel to be notified + /// when the request has been sent out. + send_feedback: HashMap>, } /// Generated by the response builder and waiting to be processed. @@ -284,6 +299,7 @@ impl RequestResponsesBehaviour { pending_requests: Default::default(), pending_responses: Default::default(), pending_responses_arrival_time: Default::default(), + send_feedback: Default::default(), }) } @@ -412,9 +428,15 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_expired_external_addr(&mut self, addr: &Multiaddr) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_expired_listen_addr(p, addr) + NetworkBehaviour::inject_expired_external_addr(p, addr) + } + } + + fn inject_expired_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(p, id, addr) } } @@ -424,9 +446,15 @@ impl NetworkBehaviour for RequestResponsesBehaviour { } } - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + fn inject_new_listener(&mut self, id: ListenerId) { + for (p, _) in self.protocols.values_mut() { + NetworkBehaviour::inject_new_listener(p, id) + } + } + + fn inject_new_listen_addr(&mut self, id: ListenerId, addr: &Multiaddr) { for (p, _) in self.protocols.values_mut() { - NetworkBehaviour::inject_new_listen_addr(p, addr) + NetworkBehaviour::inject_new_listen_addr(p, id, addr) } } @@ -463,6 +491,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { response: OutgoingResponse { result, reputation_changes, + sent_feedback, }, } = match outcome { Some(outcome) => outcome, @@ -483,6 +512,13 @@ impl NetworkBehaviour for RequestResponsesBehaviour { Dropping response", request_id, protocol_name, ); + } else { + if let Some(sent_feedback) = sent_feedback { + self.send_feedback.insert( + (protocol_name, request_id).into(), + sent_feedback + ); + } } } } @@ -668,6 +704,7 @@ impl NetworkBehaviour for RequestResponsesBehaviour { self.pending_responses_arrival_time.remove( &(protocol.clone(), request_id).into(), ); + self.send_feedback.remove(&(protocol.clone(), request_id).into()); let out = Event::InboundRequest { peer, protocol: protocol.clone(), @@ -690,11 +727,18 @@ impl NetworkBehaviour for RequestResponsesBehaviour { failed; qed.", ); + if let Some(send_feedback) = self.send_feedback.remove( + &(protocol.clone(), request_id).into() + ) { + let _ = send_feedback.send(()); + } + let out = Event::InboundRequest { peer, protocol: protocol.clone(), result: Ok(arrival_time), }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(out)); } @@ -898,7 +942,7 @@ mod tests { let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + swarm.listen_on(listen_addr.clone()).unwrap(); (swarm, listen_addr) } @@ -914,11 +958,14 @@ mod tests { pool.spawner().spawn_obj(async move { while let Some(rq) = rx.next().await { + let (fb_tx, fb_rx) = oneshot::channel(); assert_eq!(rq.payload, b"this is a request"); let _ = rq.pending_response.send(super::OutgoingResponse { result: Ok(b"this is a response".to_vec()), reputation_changes: Vec::new(), + sent_feedback: Some(fb_tx), }); + fb_rx.await.unwrap(); } }.boxed().into()).unwrap(); @@ -965,7 +1012,7 @@ mod tests { match swarm.next_event().await { SwarmEvent::ConnectionEstablished { peer_id, .. } => { let (sender, receiver) = oneshot::channel(); - swarm.send_request( + swarm.behaviour_mut().send_request( &peer_id, protocol_name, b"this is a request".to_vec(), @@ -1005,6 +1052,7 @@ mod tests { let _ = rq.pending_response.send(super::OutgoingResponse { result: Ok(b"this response exceeds the limit".to_vec()), reputation_changes: Vec::new(), + sent_feedback: None, }); } }.boxed().into()).unwrap(); @@ -1054,7 +1102,7 @@ mod tests { match swarm.next_event().await { SwarmEvent::ConnectionEstablished { peer_id, .. } => { let (sender, receiver) = oneshot::channel(); - swarm.send_request( + swarm.behaviour_mut().send_request( &peer_id, protocol_name, b"this is a request".to_vec(), @@ -1146,7 +1194,7 @@ mod tests { // Ask swarm 1 to dial swarm 2. There isn't any discovery mechanism in place in this test, // so they wouldn't connect to each other. - Swarm::dial_addr(&mut swarm_1, listen_add_2).unwrap(); + swarm_1.dial_addr(listen_add_2).unwrap(); // Run swarm 2 in the background, receiving two requests. pool.spawner().spawn_obj( @@ -1175,6 +1223,7 @@ mod tests { .send(OutgoingResponse { result: Ok(b"this is a response".to_vec()), reputation_changes: Vec::new(), + sent_feedback: None, }) .unwrap(); protocol_2_request.unwrap() @@ -1182,6 +1231,7 @@ mod tests { .send(OutgoingResponse { result: Ok(b"this is a response".to_vec()), reputation_changes: Vec::new(), + sent_feedback: None, }) .unwrap(); }.boxed().into()).unwrap(); @@ -1197,14 +1247,14 @@ mod tests { SwarmEvent::ConnectionEstablished { peer_id, .. } => { let (sender_1, receiver_1) = oneshot::channel(); let (sender_2, receiver_2) = oneshot::channel(); - swarm_1.send_request( + swarm_1.behaviour_mut().send_request( &peer_id, protocol_name_1, b"this is a request".to_vec(), sender_1, IfDisconnected::ImmediateError, ); - swarm_1.send_request( + swarm_1.behaviour_mut().send_request( &peer_id, protocol_name_2, b"this is a request".to_vec(), diff --git a/client/network/src/schema/api.v1.proto b/client/network/src/schema/api.v1.proto index a933c5811c109..23d585b05e9cd 100644 --- a/client/network/src/schema/api.v1.proto +++ b/client/network/src/schema/api.v1.proto @@ -29,6 +29,10 @@ message BlockRequest { Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. uint32 max_blocks = 6; // optional + // Indicate to the receiver that we support multiple justifications. If the responder also + // supports this it will populate the multiple justifications field in `BlockData` instead of + // the single justification field. + bool support_multiple_justifications = 7; // optional } // Response to `BlockRequest` @@ -56,5 +60,11 @@ message BlockData { // doesn't make in possible to differentiate between a lack of justification and an empty // justification. bool is_empty_justification = 7; // optional, false if absent + // Justifications if requested. + // Unlike the field for a single justification, this field does not required an associated + // boolean to differentiate between the lack of justifications and empty justification(s). This + // is because empty justifications, like all justifications, are paired with a non-empty + // consensus engine ID. + bytes justifications = 8; // optional } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 54a5559d2eaf9..4ad5053d9b287 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -79,7 +79,7 @@ use libp2p::swarm::{ SwarmEvent, protocols_handler::NodeHandlerWrapperError }; -use log::{error, info, trace, warn}; +use log::{error, info, trace, debug, warn}; use metrics::{Metrics, MetricSources, Histogram, HistogramVec}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; @@ -465,47 +465,47 @@ impl NetworkWorker { /// Returns the number of peers we're connected to. pub fn num_connected_peers(&self) -> usize { - self.network_service.user_protocol().num_connected_peers() + self.network_service.behaviour().user_protocol().num_connected_peers() } /// Returns the number of peers we're connected to and that are being queried. pub fn num_active_peers(&self) -> usize { - self.network_service.user_protocol().num_active_peers() + self.network_service.behaviour().user_protocol().num_active_peers() } /// Current global sync state. pub fn sync_state(&self) -> SyncState { - self.network_service.user_protocol().sync_state() + self.network_service.behaviour().user_protocol().sync_state() } /// Target sync block number. pub fn best_seen_block(&self) -> Option> { - self.network_service.user_protocol().best_seen_block() + self.network_service.behaviour().user_protocol().best_seen_block() } /// Number of peers participating in syncing. pub fn num_sync_peers(&self) -> u32 { - self.network_service.user_protocol().num_sync_peers() + self.network_service.behaviour().user_protocol().num_sync_peers() } /// Number of blocks in the import queue. pub fn num_queued_blocks(&self) -> u32 { - self.network_service.user_protocol().num_queued_blocks() + self.network_service.behaviour().user_protocol().num_queued_blocks() } /// Returns the number of downloaded blocks. pub fn num_downloaded_blocks(&self) -> usize { - self.network_service.user_protocol().num_downloaded_blocks() + self.network_service.behaviour().user_protocol().num_downloaded_blocks() } /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { - self.network_service.user_protocol().num_sync_requests() + self.network_service.behaviour().user_protocol().num_sync_requests() } /// Adds an address for a node. pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - self.network_service.add_known_address(peer_id, addr); + self.network_service.behaviour_mut().add_known_address(peer_id, addr); } /// Return a `NetworkService` that can be shared through the code base and can be used to @@ -516,12 +516,12 @@ impl NetworkWorker { /// You must call this when a new block is finalized by the client. pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { - self.network_service.user_protocol_mut().on_block_finalized(hash, &header); + self.network_service.behaviour_mut().user_protocol_mut().on_block_finalized(hash, &header); } /// Inform the network service about new best imported block. pub fn new_best_block_imported(&mut self, hash: B::Hash, number: NumberFor) { - self.network_service.user_protocol_mut().new_best_block_imported(hash, number); + self.network_service.behaviour_mut().user_protocol_mut().new_best_block_imported(hash, number); } /// Returns the local `PeerId`. @@ -542,15 +542,15 @@ impl NetworkWorker { /// everywhere about this. Please don't use this function to retrieve actual information. pub fn network_state(&mut self) -> NetworkState { let swarm = &mut self.network_service; - let open = swarm.user_protocol().open_peers().cloned().collect::>(); + let open = swarm.behaviour_mut().user_protocol().open_peers().cloned().collect::>(); let connected_peers = { let swarm = &mut *swarm; open.iter().filter_map(move |peer_id| { - let known_addresses = NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id) + let known_addresses = NetworkBehaviour::addresses_of_peer(swarm.behaviour_mut(), peer_id) .into_iter().collect(); - let endpoint = if let Some(e) = swarm.node(peer_id).map(|i| i.endpoint()) { + let endpoint = if let Some(e) = swarm.behaviour_mut().node(peer_id).map(|i| i.endpoint()).flatten() { e.clone().into() } else { error!(target: "sub-libp2p", "Found state inconsistency between custom protocol \ @@ -560,9 +560,9 @@ impl NetworkWorker { Some((peer_id.to_base58(), NetworkStatePeer { endpoint, - version_string: swarm.node(peer_id) + version_string: swarm.behaviour_mut().node(peer_id) .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.node(peer_id).and_then(|i| i.latest_ping()), + latest_ping_time: swarm.behaviour_mut().node(peer_id).and_then(|i| i.latest_ping()), known_addresses, })) }).collect() @@ -570,14 +570,14 @@ impl NetworkWorker { let not_connected_peers = { let swarm = &mut *swarm; - swarm.known_peers().into_iter() + swarm.behaviour_mut().known_peers().into_iter() .filter(|p| open.iter().all(|n| n != p)) .map(move |peer_id| { (peer_id.to_base58(), NetworkStateNotConnectedPeer { - version_string: swarm.node(&peer_id) + version_string: swarm.behaviour_mut().node(&peer_id) .and_then(|i| i.client_version().map(|s| s.to_owned())), - latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), - known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) + latest_ping_time: swarm.behaviour_mut().node(&peer_id).and_then(|i| i.latest_ping()), + known_addresses: NetworkBehaviour::addresses_of_peer(swarm.behaviour_mut(), &peer_id) .into_iter().collect(), }) }) @@ -585,8 +585,8 @@ impl NetworkWorker { }; let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); - let listened_addresses = Swarm::::listeners(&swarm).cloned().collect(); - let external_addresses = Swarm::::external_addresses(&swarm) + let listened_addresses = swarm.listeners().cloned().collect(); + let external_addresses = swarm.external_addresses() .map(|r| &r.addr) .cloned() .collect(); @@ -597,13 +597,13 @@ impl NetworkWorker { external_addresses, connected_peers, not_connected_peers, - peerset: swarm.user_protocol_mut().peerset_debug_info(), + peerset: swarm.behaviour_mut().user_protocol_mut().peerset_debug_info(), } } /// Get currently connected peers. pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { - self.network_service.user_protocol_mut() + self.network_service.behaviour_mut().user_protocol_mut() .peers_info() .map(|(id, info)| (id.clone(), info.clone())) .collect() @@ -647,6 +647,13 @@ impl NetworkService { .unbounded_send(ServiceToWorkerMsg::SetReservedOnly(reserved_only)); } + /// Adds an address known to a node. + pub fn add_known_address(&self, peer_id: PeerId, addr: Multiaddr) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } + /// Appends a notification to the buffer of pending outgoing notifications with the given peer. /// Has no effect if the notifications channel with this protocol name is not open. /// @@ -1347,7 +1354,7 @@ impl Future for NetworkWorker { // Check for new incoming light client requests. if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - let result = this.network_service.light_client_request(rq); + let result = this.network_service.behaviour_mut().light_client_request(rq); match result { Ok(()) => {}, Err(light_client_requests::sender::SendRequestError::TooManyRequests) => { @@ -1386,46 +1393,46 @@ impl Future for NetworkWorker { match msg { ServiceToWorkerMsg::AnnounceBlock(hash, data) => - this.network_service.user_protocol_mut().announce_block(hash, data), + this.network_service.behaviour_mut().user_protocol_mut().announce_block(hash, data), ServiceToWorkerMsg::RequestJustification(hash, number) => - this.network_service.user_protocol_mut().request_justification(&hash, number), + this.network_service.behaviour_mut().user_protocol_mut().request_justification(&hash, number), ServiceToWorkerMsg::PropagateTransaction(hash) => this.tx_handler_controller.propagate_transaction(hash), ServiceToWorkerMsg::PropagateTransactions => this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => - this.network_service.get_value(&key), + this.network_service.behaviour_mut().get_value(&key), ServiceToWorkerMsg::PutValue(key, value) => - this.network_service.put_value(key, value), + this.network_service.behaviour_mut().put_value(key, value), ServiceToWorkerMsg::SetReservedOnly(reserved_only) => - this.network_service.user_protocol_mut().set_reserved_only(reserved_only), + this.network_service.behaviour_mut().user_protocol_mut().set_reserved_only(reserved_only), ServiceToWorkerMsg::SetReserved(peers) => - this.network_service.user_protocol_mut().set_reserved_peers(peers), + this.network_service.behaviour_mut().user_protocol_mut().set_reserved_peers(peers), ServiceToWorkerMsg::AddReserved(peer_id) => - this.network_service.user_protocol_mut().add_reserved_peer(peer_id), + this.network_service.behaviour_mut().user_protocol_mut().add_reserved_peer(peer_id), ServiceToWorkerMsg::RemoveReserved(peer_id) => - this.network_service.user_protocol_mut().remove_reserved_peer(peer_id), + this.network_service.behaviour_mut().user_protocol_mut().remove_reserved_peer(peer_id), ServiceToWorkerMsg::AddSetReserved(protocol, peer_id) => - this.network_service.user_protocol_mut().add_set_reserved_peer(protocol, peer_id), + this.network_service.behaviour_mut().user_protocol_mut().add_set_reserved_peer(protocol, peer_id), ServiceToWorkerMsg::RemoveSetReserved(protocol, peer_id) => - this.network_service.user_protocol_mut().remove_set_reserved_peer(protocol, peer_id), + this.network_service.behaviour_mut().user_protocol_mut().remove_set_reserved_peer(protocol, peer_id), ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => - this.network_service.add_known_address(peer_id, addr), + this.network_service.behaviour_mut().add_known_address(peer_id, addr), ServiceToWorkerMsg::AddToPeersSet(protocol, peer_id) => - this.network_service.user_protocol_mut().add_to_peers_set(protocol, peer_id), + this.network_service.behaviour_mut().user_protocol_mut().add_to_peers_set(protocol, peer_id), ServiceToWorkerMsg::RemoveFromPeersSet(protocol, peer_id) => - this.network_service.user_protocol_mut().remove_from_peers_set(protocol, peer_id), + this.network_service.behaviour_mut().user_protocol_mut().remove_from_peers_set(protocol, peer_id), ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => - this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), + this.network_service.behaviour_mut().user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), ServiceToWorkerMsg::Request { target, protocol, request, pending_response, connect } => { - this.network_service.send_request(&target, &protocol, request, pending_response, connect); + this.network_service.behaviour_mut().send_request(&target, &protocol, request, pending_response, connect); }, ServiceToWorkerMsg::DisconnectPeer(who, protocol_name) => - this.network_service.user_protocol_mut().disconnect_peer(&who, &protocol_name), + this.network_service.behaviour_mut().user_protocol_mut().disconnect_peer(&who, &protocol_name), ServiceToWorkerMsg::NewBestBlockImported(hash, number) => - this.network_service.user_protocol_mut().new_best_block_imported(hash, number), + this.network_service.behaviour_mut().user_protocol_mut().new_best_block_imported(hash, number), } } @@ -1470,7 +1477,11 @@ impl Future for NetworkWorker { let reason = match err { ResponseFailure::Network(InboundFailure::Timeout) => "timeout", ResponseFailure::Network(InboundFailure::UnsupportedProtocols) => - "unsupported", + // `UnsupportedProtocols` is reported for every single + // inbound request whenever a request with an unsupported + // protocol is received. This is not reported in order to + // avoid confusions. + continue, ResponseFailure::Network(InboundFailure::ResponseOmission) => "busy-omitted", ResponseFailure::Network(InboundFailure::ConnectionClosed) => @@ -1623,7 +1634,7 @@ impl Future for NetworkWorker { this.event_streams.send(Event::Dht(event)); }, Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, num_established }) => { - trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + debug!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); if let Some(metrics) = this.metrics.as_ref() { let direction = match endpoint { @@ -1638,7 +1649,7 @@ impl Future for NetworkWorker { } }, Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, num_established }) => { - trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); if let Some(metrics) = this.metrics.as_ref() { let direction = match endpoint { ConnectedPoint::Dialer { .. } => "out", @@ -1715,7 +1726,7 @@ impl Future for NetworkWorker { } }, Poll::Ready(SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { - trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", + debug!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", local_addr, send_back_addr, error); if let Some(metrics) = this.metrics.as_ref() { let reason = match error { @@ -1729,7 +1740,7 @@ impl Future for NetworkWorker { } }, Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { - trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", + debug!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", peer_id, endpoint); if let Some(metrics) = this.metrics.as_ref() { metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); @@ -1758,7 +1769,7 @@ impl Future for NetworkWorker { } }, Poll::Ready(SwarmEvent::ListenerError { error }) => { - trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); + debug!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_errors_total.inc(); } @@ -1766,7 +1777,7 @@ impl Future for NetworkWorker { }; } - let num_connected_peers = this.network_service.user_protocol_mut().num_connected_peers(); + let num_connected_peers = this.network_service.behaviour_mut().user_protocol_mut().num_connected_peers(); // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); @@ -1778,7 +1789,7 @@ impl Future for NetworkWorker { *this.external_addresses.lock() = external_addresses; } - let is_major_syncing = match this.network_service.user_protocol_mut().sync_state() { + let is_major_syncing = match this.network_service.behaviour_mut().user_protocol_mut().sync_state() { SyncState::Idle => false, SyncState::Downloading => true, }; @@ -1788,21 +1799,25 @@ impl Future for NetworkWorker { this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { - for (proto, buckets) in this.network_service.num_entries_per_kbucket() { + for (proto, buckets) in this.network_service.behaviour_mut().num_entries_per_kbucket() { for (lower_ilog2_bucket_bound, num_entries) in buckets { metrics.kbuckets_num_nodes .with_label_values(&[&proto.as_ref(), &lower_ilog2_bucket_bound.to_string()]) .set(num_entries as u64); } } - for (proto, num_entries) in this.network_service.num_kademlia_records() { + for (proto, num_entries) in this.network_service.behaviour_mut().num_kademlia_records() { metrics.kademlia_records_count.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); } - for (proto, num_entries) in this.network_service.kademlia_records_total_size() { + for (proto, num_entries) in this.network_service.behaviour_mut().kademlia_records_total_size() { metrics.kademlia_records_sizes_total.with_label_values(&[&proto.as_ref()]).set(num_entries as u64); } - metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); - metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); + metrics.peerset_num_discovered.set( + this.network_service.behaviour_mut().user_protocol().num_discovered_peers() as u64 + ); + metrics.peerset_num_requested.set( + this.network_service.behaviour_mut().user_protocol().requested_peers().count() as u64 + ); metrics.pending_connections.set( Swarm::network_info(&this.network_service).connection_counters().num_pending() as u64 ); @@ -1830,13 +1845,13 @@ impl<'a, B: BlockT> Link for NetworkLink<'a, B> { count: usize, results: Vec<(Result>, BlockImportError>, B::Hash)> ) { - self.protocol.user_protocol_mut().on_blocks_processed(imported, count, results) + self.protocol.behaviour_mut().user_protocol_mut().on_blocks_processed(imported, count, results) } fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { - self.protocol.user_protocol_mut().justification_import_result(who, hash.clone(), number, success); + self.protocol.behaviour_mut().user_protocol_mut().justification_import_result(who, hash.clone(), number, success); } fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_justification(hash, number) + self.protocol.behaviour_mut().user_protocol_mut().request_justification(hash, number) } } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index fd8cf4c3d105f..dd4a0597cbcbc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -47,8 +47,10 @@ fn build_test_full_node(config: config::NetworkConfiguration) #[derive(Clone)] struct PassThroughVerifier(bool); + + #[async_trait::async_trait] impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, origin: sp_consensus::BlockOrigin, header: B::Header, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 20ac8314b7477..b694182e6a231 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -360,7 +360,7 @@ impl TransactionsHandler { ) { // sending transaction to light node is considered a bad behavior if matches!(self.local_role, config::Role::Light) { - trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); + debug!(target: "sync", "Peer {} is trying to send transactions to the light node", who); self.service.disconnect_peer(who, self.protocol_name.clone()); self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); return; diff --git a/client/network/src/utils.rs b/client/network/src/utils.rs index 02673ef49fb4c..b2ae03777e651 100644 --- a/client/network/src/utils.rs +++ b/client/network/src/utils.rs @@ -59,6 +59,11 @@ impl LruHashSet { } false } + + /// Removes an element from the set if it is present. + pub fn remove(&mut self, e: &T) -> bool { + self.set.remove(e) + } } #[cfg(test)] diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 7ba468fa3f78f..18a8d5cf8ca0a 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -20,7 +20,7 @@ parking_lot = "0.11.1" futures = "0.3.9" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.36.0", default-features = false } +libp2p = { version = "0.37.1", default-features = false } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.9.0", path = "../../consensus/common" } sc-client-api = { version = "3.0.0", path = "../../api" } @@ -34,3 +34,4 @@ substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtim tempfile = "3.1.0" sp-tracing = { version = "3.0.0", path = "../../../primitives/tracing" } sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../service" } +async-trait = "0.1.42" diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 200c7357c4244..b3641d4b41214 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -26,12 +26,13 @@ use substrate_test_runtime_client::{self, prelude::*}; use substrate_test_runtime_client::runtime::{Block, Hash}; use sp_runtime::generic::BlockId; use sc_block_builder::BlockBuilderProvider; +use futures::executor::block_on; use super::*; fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { let mut client = substrate_test_runtime_client::new(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::File, block).unwrap(); + block_on(client.import(BlockOrigin::File, block)).unwrap(); let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); let header = client.header(&BlockId::Number(1)).unwrap(); @@ -55,12 +56,12 @@ fn import_single_good_block_works() { let mut expected_aux = ImportedAux::default(); expected_aux.is_new_best = true; - match import_single_block( + match block_on(import_single_block( &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier::new(true) - ) { + )) { Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} r @ _ => panic!("{:?}", r) @@ -70,12 +71,12 @@ fn import_single_good_block_works() { #[test] fn import_single_good_known_block_is_ignored() { let (mut client, _hash, number, _, block) = prepare_good_block(); - match import_single_block( + match block_on(import_single_block( &mut client, BlockOrigin::File, block, &mut PassThroughVerifier::new(true) - ) { + )) { Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {} _ => panic!() } @@ -85,12 +86,12 @@ fn import_single_good_known_block_is_ignored() { fn import_single_good_block_without_header_fails() { let (_, _, _, peer_id, mut block) = prepare_good_block(); block.header = None; - match import_single_block( + match block_on(import_single_block( &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier::new(true) - ) { + )) { Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} _ => panic!() } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 32a6e07eab428..689eca8aac5dd 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -23,8 +23,7 @@ mod block_import; mod sync; use std::{ - borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, - task::{Poll, Context as FutureContext} + borrow::Cow, collections::HashMap, pin::Pin, sync::Arc, task::{Poll, Context as FutureContext} }; use libp2p::build_multiaddr; @@ -64,7 +63,7 @@ use sc_network::config::ProtocolConfig; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_runtime::{Justification, Justifications}; -use substrate_test_runtime_client::{self, AccountKeyring}; +use substrate_test_runtime_client::AccountKeyring; use sc_service::client::Client; pub use sc_network::config::EmptyTransactionPool; pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; @@ -104,8 +103,9 @@ impl PassThroughVerifier { } /// This `Verifier` accepts all data as valid. +#[async_trait::async_trait] impl Verifier for PassThroughVerifier { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, @@ -154,13 +154,8 @@ impl PeersClient { } } - pub fn as_block_import(&self) -> BlockImportAdapter { - match *self { - PeersClient::Full(ref client, ref _backend) => - BlockImportAdapter::new_full(client.clone()), - PeersClient::Light(ref client, ref _backend) => - BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData), - } + pub fn as_block_import(&self) -> BlockImportAdapter { + BlockImportAdapter::new(self.clone()) } pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { @@ -218,7 +213,36 @@ impl PeersClient { } } -pub struct Peer { +#[async_trait::async_trait] +impl BlockImport for PeersClient { + type Error = ConsensusError; + type Transaction = (); + + async fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + match self { + PeersClient::Full(client, _) => client.check_block(block).await, + PeersClient::Light(client, _) => client.check_block(block).await, + } + } + + async fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + match self { + PeersClient::Full(client, _) => + client.import_block(block.convert_transaction(), cache).await, + PeersClient::Light(client, _) => + client.import_block(block.convert_transaction(), cache).await, + } + } +} + +pub struct Peer { pub data: D, client: PeersClient, /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, @@ -226,7 +250,7 @@ pub struct Peer { verifier: VerifierAdapter, /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, /// instead of going through the import queue. - block_import: BlockImportAdapter<()>, + block_import: BlockImportAdapter, select_chain: Option>, backend: Option>, network: NetworkWorker::Hash>, @@ -235,7 +259,10 @@ pub struct Peer { listen_addr: Multiaddr, } -impl Peer { +impl Peer where + B: BlockImport + Send + Sync, + B::Transaction: Send, +{ /// Get this peer ID. pub fn id(&self) -> PeerId { self.network.service().local_peer_id().clone() @@ -277,13 +304,24 @@ impl Peer { } /// Request explicit fork sync. - pub fn set_sync_fork_request(&self, peers: Vec, hash: ::Hash, number: NumberFor) { + pub fn set_sync_fork_request( + &self, + peers: Vec, + hash: ::Hash, + number: NumberFor, + ) { self.network.service().set_sync_fork_request(peers, hash, number); } /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block + pub fn generate_blocks( + &mut self, + count: usize, + origin: BlockOrigin, + edit_block: F, + ) -> H256 + where + F: FnMut(BlockBuilder) -> Block { let best_hash = self.client.info().best_hash; self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false, true, true) @@ -320,19 +358,21 @@ impl Peer { block.header.parent_hash, ); let header = block.header.clone(); - let (import_block, cache) = self.verifier.verify( + let (import_block, cache) = futures::executor::block_on(self.verifier.verify( origin, header.clone(), None, if headers_only { None } else { Some(block.extrinsics) }, - ).unwrap(); + )).unwrap(); let cache = if let Some(cache) = cache { cache.into_iter().collect() } else { Default::default() }; - self.block_import.import_block(import_block, cache).expect("block_import failed"); + futures::executor::block_on( + self.block_import.import_block(import_block, cache) + ).expect("block_import failed"); if announce_block { self.network.service().announce_block(hash, None); } @@ -449,6 +489,11 @@ impl Peer { &self.network.service() } + /// Get a reference to the network worker. + pub fn network(&self) -> &NetworkWorker::Hash> { + &self.network + } + /// Test helper to compare the blockchain state of multiple (networked) /// clients. pub fn blockchain_canon_equals(&self, other: &Self) -> bool { @@ -478,102 +523,80 @@ impl Peer { } } +pub trait BlockImportAdapterFull: + BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + + Send + + Sync + + Clone +{} + +impl BlockImportAdapterFull for T where + T: BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + + Send + + Sync + + Clone +{} + /// Implements `BlockImport` for any `Transaction`. Internally the transaction is /// "converted", aka the field is set to `None`. /// /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. -pub enum BlockImportAdapter { - Full( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), - Light( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), +#[derive(Clone)] +pub struct BlockImportAdapter { + inner: I, } -impl BlockImportAdapter { +impl BlockImportAdapter { /// Create a new instance of `Self::Full`. - pub fn new_full( - full: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Full(Arc::new(Mutex::new(full)), PhantomData) - } - - /// Create a new instance of `Self::Light`. - pub fn new_light( - light: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Light(Arc::new(Mutex::new(light)), PhantomData) - } -} - -impl Clone for BlockImportAdapter { - fn clone(&self) -> Self { - match self { - Self::Full(full, _) => Self::Full(full.clone(), PhantomData), - Self::Light(light, _) => Self::Light(light.clone(), PhantomData), + pub fn new(inner: I) -> Self { + Self { + inner, } } } -impl BlockImport for BlockImportAdapter { +#[async_trait::async_trait] +impl BlockImport for BlockImportAdapter where + I: BlockImport + Send + Sync, + I::Transaction: Send, +{ type Error = ConsensusError; - type Transaction = Transaction; + type Transaction = (); - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - match self { - Self::Full(full, _) => full.lock().check_block(block), - Self::Light(light, _) => light.lock().check_block(block), - } + self.inner.check_block(block).await } - fn import_block( + async fn import_block( &mut self, - block: BlockImportParams, + block: BlockImportParams, cache: HashMap>, ) -> Result { - match self { - Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), - Self::Light(light, _) => light.lock().import_block(block.convert_transaction(), cache), - } + self.inner.import_block(block.convert_transaction(), cache).await } } -/// Implements `Verifier` on an `Arc>`. Used internally. -#[derive(Clone)] +/// Implements `Verifier` and keeps track of failed verifications. struct VerifierAdapter { - verifier: Arc>>>, + verifier: Arc>>>, failed_verifications: Arc>>, } +#[async_trait::async_trait] impl Verifier for VerifierAdapter { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, @@ -581,17 +604,26 @@ impl Verifier for VerifierAdapter { body: Option> ) -> Result<(BlockImportParams, Option)>>), String> { let hash = header.hash(); - self.verifier.lock().verify(origin, header, justifications, body).map_err(|e| { + self.verifier.lock().await.verify(origin, header, justifications, body).await.map_err(|e| { self.failed_verifications.lock().insert(hash, e.clone()); e }) } } +impl Clone for VerifierAdapter { + fn clone(&self) -> Self { + Self { + verifier: self.verifier.clone(), + failed_verifications: self.failed_verifications.clone(), + } + } +} + impl VerifierAdapter { - fn new(verifier: Arc>>>) -> VerifierAdapter { + fn new(verifier: impl Verifier + 'static) -> Self { VerifierAdapter { - verifier, + verifier: Arc::new(futures::lock::Mutex::new(Box::new(verifier))), failed_verifications: Default::default(), } } @@ -614,8 +646,9 @@ pub struct FullPeerConfig { pub is_authority: bool, } -pub trait TestNetFactory: Sized { +pub trait TestNetFactory: Sized where >::Transaction: Send { type Verifier: 'static + Verifier; + type BlockImport: BlockImport + Clone + Send + Sync + 'static; type PeerData: Default; /// These two need to be implemented! @@ -628,23 +661,20 @@ pub trait TestNetFactory: Sized { ) -> Self::Verifier; /// Get reference to peer. - fn peer(&mut self, i: usize) -> &mut Peer; - fn peers(&self) -> &Vec>; - fn mut_peers>)>( + fn peer(&mut self, i: usize) -> &mut Peer; + fn peers(&self) -> &Vec>; + fn mut_peers>)>( &mut self, closure: F, ); /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, Self::PeerData, - ) - { - (client.as_block_import(), None, Default::default()) - } + ); fn default_config() -> ProtocolConfig { ProtocolConfig::default() @@ -688,7 +718,7 @@ pub trait TestNetFactory: Sized { &Default::default(), &data, ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( verifier.clone(), @@ -776,7 +806,7 @@ pub trait TestNetFactory: Sized { peers.push(Peer { data, - client: PeersClient::Full(client, backend.clone()), + client: PeersClient::Full(client.clone(), backend.clone()), select_chain: Some(longest_chain), backend: Some(backend), imported_blocks_stream, @@ -804,7 +834,7 @@ pub trait TestNetFactory: Sized { &Default::default(), &data, ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let verifier = VerifierAdapter::new(verifier); let import_queue = Box::new(BasicQueue::new( verifier.clone(), @@ -960,12 +990,12 @@ pub trait TestNetFactory: Sized { /// Polls the testnet. Processes all the pending actions. fn poll(&mut self, cx: &mut FutureContext) { self.mut_peers(|peers| { - for peer in peers { - trace!(target: "sync", "-- Polling {}", peer.id()); + for (i, peer) in peers.into_iter().enumerate() { + trace!(target: "sync", "-- Polling {}: {}", i, peer.id()); if let Poll::Ready(()) = peer.network.poll_unpin(cx) { panic!("NetworkWorker has terminated unexpectedly.") } - trace!(target: "sync", "-- Polling complete {}", peer.id()); + trace!(target: "sync", "-- Polling complete {}: {}", i, peer.id()); // We poll `imported_blocks_stream`. while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { @@ -986,7 +1016,7 @@ pub trait TestNetFactory: Sized { } pub struct TestNet { - peers: Vec>, + peers: Vec>, fork_choice: ForkChoiceStrategy, } @@ -1003,6 +1033,7 @@ impl TestNet { impl TestNetFactory for TestNet { type Verifier = PassThroughVerifier; type PeerData = (); + type BlockImport = PeersClient; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -1018,15 +1049,25 @@ impl TestNetFactory for TestNet { PassThroughVerifier::new_with_fork_choice(false, self.fork_choice.clone()) } - fn peer(&mut self, i: usize) -> &mut Peer<()> { + fn make_block_import(&self, client: PeersClient) + -> ( + BlockImportAdapter, + Option>, + Self::PeerData, + ) + { + (client.as_block_import(), None, ()) + } + + fn peer(&mut self, i: usize) -> &mut Peer<(), Self::BlockImport> { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -1052,6 +1093,7 @@ pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { type Verifier = PassThroughVerifier; type PeerData = (); + type BlockImport = PeersClient; fn from_config(config: &ProtocolConfig) -> Self { JustificationTestNet(TestNet::from_config(config)) @@ -1061,23 +1103,23 @@ impl TestNetFactory for JustificationTestNet { self.0.make_verifier(client, config, peer_data) } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut Peer { self.0.peer(i) } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { self.0.peers() } fn mut_peers>, + &mut Vec>, )>(&mut self, closure: F) { self.0.mut_peers(closure) } - fn make_block_import(&self, client: PeersClient) + fn make_block_import(&self, client: PeersClient) -> ( - BlockImportAdapter, + BlockImportAdapter, Option>, Self::PeerData, ) diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 13a07973be95d..979dc880d795d 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -743,6 +743,27 @@ impl BlockAnnounceValidator for NewBestBlockAnnounceValidator { } } +/// Returns `Validation::Failure` for specified block number +struct FailingBlockAnnounceValidator(u64); + +impl BlockAnnounceValidator for FailingBlockAnnounceValidator { + fn validate( + &mut self, + header: &Header, + _: &[u8], + ) -> Pin>> + Send>> { + let number = *header.number(); + let target_number = self.0; + async move { Ok( + if number == target_number { + Validation::Failure { disconnect: false } + } else { + Validation::Success { is_new_best: true } + } + ) }.boxed() + } +} + #[test] fn sync_blocks_when_block_announce_validator_says_it_is_new_best() { sp_tracing::try_init_simple(); @@ -1016,3 +1037,59 @@ fn multiple_requests_are_accepted_as_long_as_they_are_not_fulfilled() { Poll::Ready(()) })); } + +#[test] +fn syncs_all_forks_from_single_peer() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(2); + net.peer(0).push_blocks(10, false); + net.peer(1).push_blocks(10, false); + + // poll until the two nodes connect, otherwise announcing the block will not work + net.block_until_connected(); + + // Peer 0 produces new blocks and announces. + let branch1 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, true); + + // Wait till peer 1 starts downloading + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).network().best_seen_block() != Some(12) { + return Poll::Pending + } + Poll::Ready(()) + })); + + // Peer 0 produces and announces another fork + let branch2 = net.peer(0).push_blocks_at(BlockId::Number(10), 2, false); + + net.block_until_sync(); + + // Peer 1 should have both branches, + assert!(net.peer(1).client().header(&BlockId::Hash(branch1)).unwrap().is_some()); + assert!(net.peer(1).client().header(&BlockId::Hash(branch2)).unwrap().is_some()); +} + +#[test] +fn syncs_after_missing_announcement() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(0); + net.add_full_peer_with_config(Default::default()); + // Set peer 1 to ignore announcement + net.add_full_peer_with_config(FullPeerConfig { + block_announce_validator: Some(Box::new(FailingBlockAnnounceValidator(11))), + ..Default::default() + }); + net.peer(0).push_blocks(10, false); + net.peer(1).push_blocks(10, false); + + net.block_until_connected(); + + // Peer 0 produces a new block and announces. Peer 1 ignores announcement. + net.peer(0).push_blocks_at(BlockId::Number(10), 1, false); + // Peer 0 produces another block and announces. + let final_block = net.peer(0).push_blocks_at(BlockId::Number(11), 1, false); + net.peer(1).push_blocks_at(BlockId::Number(10), 1, true); + net.block_until_sync(); + assert!(net.peer(1).client().header(&BlockId::Hash(final_block)).unwrap().is_some()); +} diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index dbe8e55b3646b..f03f7a93b856c 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -183,10 +183,7 @@ impl HttpApi { ) -> Result<(), HttpError> { // Extract the request from the list. // Don't forget to add it back if necessary when returning. - let mut request = match self.requests.remove(&request_id) { - None => return Err(HttpError::Invalid), - Some(r) => r, - }; + let mut request = self.requests.remove(&request_id).ok_or_else(|| HttpError::Invalid)?; let mut deadline = timestamp::deadline_to_future(deadline); // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 717f02eccd5dc..26975edbd6b63 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -240,6 +240,7 @@ mod tests { use sp_consensus::BlockOrigin; use sc_client_api::Backend as _; use sc_block_builder::BlockBuilderProvider as _; + use futures::executor::block_on; struct TestNetwork(); @@ -331,7 +332,7 @@ mod tests { ).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(value, &offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).unwrap()); @@ -341,7 +342,7 @@ mod tests { ).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert!(offchain_db.get(sp_offchain::STORAGE_PREFIX, &key).is_none()); } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 984bfc5e835ff..5910116ec01c1 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" -libp2p = { version = "0.36.0", default-features = false } +libp2p = { version = "0.37.1", default-features = false } sp-utils = { version = "3.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index c200d2729e16c..309c7e6b8f973 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -272,15 +272,11 @@ impl PeersState { }) .map(|(peer_id, _)| peer_id.clone()); - if let Some(peer_id) = outcome { - Some(NotConnectedPeer { + outcome.map(move |peer_id| NotConnectedPeer { state: self, set, peer_id: Cow::Owned(peer_id), }) - } else { - None - } } /// Returns `true` if there is a free outgoing slot available related to this set. diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index 025ff53c2fa95..bb673d65ea0f2 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -67,7 +67,7 @@ fn should_return_a_block() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let block_hash = block.hash(); - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Genesis block is not justified assert_matches!( @@ -133,7 +133,7 @@ fn should_return_block_hash() { ); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); assert_matches!( api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), @@ -167,7 +167,7 @@ fn should_return_finalized_hash() { // import new block let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // no finalization yet assert_matches!( api.finalized_head(), @@ -199,7 +199,7 @@ fn should_notify_about_latest_block() { )); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } // assert initial head sent. @@ -229,7 +229,7 @@ fn should_notify_about_best_block() { )); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } // assert initial head sent. @@ -259,7 +259,7 @@ fn should_notify_about_finalized_block() { )); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); client.finalize_block(BlockId::number(1), None).unwrap(); } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index c8c921345877c..4bc4b0772784d 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -722,13 +722,10 @@ fn maybe_share_remote_request(future: F) -> impl std::future::Future> where F: std::future::Future> { - future.then(|result| ready(match result { - Ok(result) => Ok(result), - Err(err) => { + future.then(|result| ready(result.or_else(|err| { warn!("Remote request for subscription data has failed with: {:?}", err); Err(()) - }, - })) + }))) } /// Convert successful future result into Ok(Some(result)) and error into Ok(None), diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 87b0fae1d6b3c..b5d30b3413903 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -180,7 +180,7 @@ fn should_notify_about_storage_changes() { nonce: 0, }).unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } // assert notification sent to transport @@ -222,7 +222,7 @@ fn should_send_initial_storage_changes_and_notifications() { nonce: 0, }).unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); } // assert initial values sent to transport @@ -258,7 +258,7 @@ fn should_query_storage() { builder.push_storage_change(vec![5], Some(vec![nonce as u8])).unwrap(); let block = builder.build().unwrap().block; let hash = block.header.hash(); - client.import(BlockOrigin::Own, block).unwrap(); + executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); hash }; let block1_hash = add_block(0); diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 6ce1ed8b34e14..cff05390d7874 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -78,6 +78,7 @@ sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } tracing = "0.1.25" tracing-futures = { version = "0.2.4" } parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } +async-trait = "0.1.42" [target.'cfg(not(target_os = "unknown"))'.dependencies] tempfile = "3.1.0" diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a39c456641920..f05a2751995da 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -585,10 +585,8 @@ impl Client where &dyn PrunableStateChangesTrieStorage, Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, )> { - let storage = match self.backend.changes_trie_storage() { - Some(storage) => storage, - None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), - }; + let storage = self.backend.changes_trie_storage() + .ok_or_else(|| sp_blockchain::Error::ChangesTriesNotSupported)?; let mut configs = Vec::with_capacity(1); let mut current = last; @@ -1153,10 +1151,8 @@ impl Client where /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { let load_header = |id: Block::Hash| -> sp_blockchain::Result { - match self.backend.blockchain().header(BlockId::Hash(id))? { - Some(hdr) => Ok(hdr), - None => Err(Error::UnknownBlock(format!("{:?}", id))), - } + self.backend.blockchain().header(BlockId::Hash(id))? + .ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))) }; let genesis_hash = self.backend.blockchain().info().genesis_hash; @@ -1698,6 +1694,7 @@ impl CallApiAt for Client where /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. +#[async_trait::async_trait] impl sp_consensus::BlockImport for &Client where B: backend::Backend, E: CallExecutor + Send + Sync, @@ -1705,6 +1702,8 @@ impl sp_consensus::BlockImport for &Client: ProvideRuntimeApi, as ProvideRuntimeApi>::Api: CoreApi + ApiExt, + RA: Sync + Send, + backend::TransactionFor: Send + 'static, { type Error = ConsensusError; type Transaction = backend::TransactionFor; @@ -1718,7 +1717,7 @@ impl sp_consensus::BlockImport for &Client>, new_cache: HashMap>, @@ -1742,7 +1741,7 @@ impl sp_consensus::BlockImport for &Client, ) -> Result { @@ -1798,6 +1797,7 @@ impl sp_consensus::BlockImport for &Client sp_consensus::BlockImport for Client where B: backend::Backend, E: CallExecutor + Send + Sync, @@ -1805,23 +1805,25 @@ impl sp_consensus::BlockImport for Client, >::Api: CoreApi + ApiExt, + RA: Sync + Send, + backend::TransactionFor: Send + 'static, { type Error = ConsensusError; type Transaction = backend::TransactionFor; - fn import_block( + async fn import_block( &mut self, import_block: BlockImportParams, new_cache: HashMap>, ) -> Result { - (&*self).import_block(import_block, new_cache) + (&*self).import_block(import_block, new_cache).await } - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (&*self).check_block(block) + (&*self).check_block(block).await } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 4ca784558dbf3..db5f296953e3f 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -391,9 +391,8 @@ fn start_rpc_servers< ) -> Result, error::Error> { fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> where F: FnMut(&SocketAddr) -> Result, - { - Ok(match address { - Some(mut address) => Some(start(&address) + { + address.map(|mut address| start(&address) .or_else(|e| match e.kind() { io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { @@ -402,10 +401,9 @@ fn start_rpc_servers< start(&address) }, _ => Err(e), - })?), - None => None, - }) - } + } + ) ).transpose() + } fn deny_unsafe(addr: &SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { let is_exposed_addr = !addr.ip().is_loopback(); diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index e55320d6c5fb7..2108d7e26fa83 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -28,7 +28,7 @@ sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } sp-storage = { version = "3.0.0", path = "../../../primitives/storage" } sc-client-db = { version = "0.9.0", default-features = false, path = "../../db" } futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.9.0", default-features = false, features = ["test-helpers"], path = "../../service" } +sc-service = { version = "0.9.0", features = ["test-helpers"], path = "../../service" } sc-network = { version = "0.9.0", path = "../../network" } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index 02d54a24c3135..a183cbce62bdb 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -375,11 +375,11 @@ fn execution_proof_is_generated_and_checked() { for i in 1u32..3u32 { let mut digest = Digest::default(); digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); - remote_client.import_justified( + futures::executor::block_on(remote_client.import_justified( BlockOrigin::Own, remote_client.new_block(digest).unwrap().build().unwrap().block, Justifications::from((*b"TEST", Default::default())), - ).unwrap(); + )).unwrap(); } // check method that doesn't requires environment @@ -540,7 +540,7 @@ fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Heade let mut local_headers_hashes = Vec::new(); for i in 0..4 { let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); local_headers_hashes.push( remote_client.block_hash(i + 1) .map_err(|_| ClientError::Backend("TestError".into())) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index d8a09734bebb6..0234f43513d56 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -54,6 +54,7 @@ use sp_storage::StorageKey; use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_runtime::{generic::BlockId, DigestItem, Justifications}; use hex_literal::hex; +use futures::executor::block_on; mod light; mod db; @@ -108,7 +109,7 @@ pub fn prepare_client_with_key_changes() -> ( }).unwrap(); } let block = builder.build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); + block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); let trie_root = header.digest().log(DigestItem::as_changes_trie_root) @@ -363,7 +364,7 @@ fn block_builder_works_with_no_transactions() { let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); } @@ -382,7 +383,7 @@ fn block_builder_works_with_transactions() { }).unwrap(); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); assert_ne!( @@ -428,7 +429,7 @@ fn block_builder_does_not_include_invalid() { ); let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); assert_eq!(client.chain_info().best_number, 1); assert_ne!( @@ -476,11 +477,11 @@ fn uncles_with_only_ancestors() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let v: Vec = Vec::new(); assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); } @@ -496,7 +497,7 @@ fn uncles_with_multiple_forks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -504,7 +505,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let a3 = client.new_block_at( @@ -512,7 +513,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 let a4 = client.new_block_at( @@ -520,7 +521,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 let a5 = client.new_block_at( @@ -528,7 +529,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 let mut builder = client.new_block_at( @@ -544,7 +545,7 @@ fn uncles_with_multiple_forks() { nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 let b3 = client.new_block_at( @@ -552,7 +553,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 let b4 = client.new_block_at( @@ -560,7 +561,7 @@ fn uncles_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 let mut builder = client.new_block_at( @@ -576,7 +577,7 @@ fn uncles_with_multiple_forks() { nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 let mut builder = client.new_block_at( @@ -592,7 +593,7 @@ fn uncles_with_multiple_forks() { nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -624,11 +625,11 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -648,7 +649,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -656,7 +657,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let a3 = client.new_block_at( @@ -664,7 +665,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 let a4 = client.new_block_at( @@ -672,7 +673,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 let a5 = client.new_block_at( @@ -680,7 +681,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 let mut builder = client.new_block_at( @@ -696,7 +697,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 let b3 = client.new_block_at( @@ -704,7 +705,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 let b4 = client.new_block_at( @@ -712,7 +713,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); // // B2 -> C3 let mut builder = client.new_block_at( @@ -728,7 +729,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 let mut builder = client.new_block_at( @@ -744,7 +745,7 @@ fn best_containing_on_longest_chain_with_multiple_forks() { nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); assert_eq!(client.chain_info().best_hash, a5.hash()); @@ -952,11 +953,15 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert_eq!(None, longest_chain_select.finality_target( b4.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - c3.hash().clone(), Some(0)).unwrap()); + assert_eq!( + None, + longest_chain_select.finality_target(c3.hash().clone(), Some(0)).unwrap(), + ); - assert_eq!(None, longest_chain_select.finality_target( - d2.hash().clone(), Some(0)).unwrap()); + assert_eq!( + None, + longest_chain_select.finality_target(d2.hash().clone(), Some(0)).unwrap(), + ); } #[test] @@ -968,15 +973,18 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); + assert_eq!( + a2.hash(), + longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap(), + ); } #[test] @@ -1008,7 +1016,7 @@ fn import_with_justification() { // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -1016,7 +1024,7 @@ fn import_with_justification() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); @@ -1025,7 +1033,7 @@ fn import_with_justification() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); + block_on(client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone())).unwrap(); assert_eq!( client.chain_info().finalized_hash, @@ -1060,14 +1068,14 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1092,7 +1100,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { // importing B1 as finalized should trigger a re-org and set it as new best let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); - client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); + block_on(client.import_justified(BlockOrigin::Own, b1.clone(), justification)).unwrap(); assert_eq!( client.chain_info().best_hash, @@ -1117,14 +1125,14 @@ fn finalizing_diverged_block_should_trigger_reorg() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1139,14 +1147,14 @@ fn finalizing_diverged_block_should_trigger_reorg() { nonce: 0, }).unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client.new_block_at( &BlockId::Hash(b1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // A2 is the current best since it's the longest chain assert_eq!( @@ -1184,7 +1192,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); assert_eq!( client.chain_info().best_hash, @@ -1227,7 +1235,7 @@ fn state_reverted_on_reorg() { nonce: 0, }).unwrap(); let a1 = a1.build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1242,7 +1250,7 @@ fn state_reverted_on_reorg() { }).unwrap(); let b1 = b1.build().unwrap().block; // Reorg to B1 - client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import_as_best(BlockOrigin::Own, b1.clone())).unwrap(); assert_eq!(950, current_balance(&client)); let mut a2 = client.new_block_at( @@ -1258,7 +1266,7 @@ fn state_reverted_on_reorg() { }).unwrap(); let a2 = a2.build().unwrap().block; // Re-org to A2 - client.import_as_best(BlockOrigin::Own, a2).unwrap(); + block_on(client.import_as_best(BlockOrigin::Own, a2)).unwrap(); assert_eq!(980, current_balance(&client)); } @@ -1297,14 +1305,14 @@ fn doesnt_import_blocks_that_revert_finality() { Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1316,11 +1324,11 @@ fn doesnt_import_blocks_that_revert_finality() { nonce: 0, }).unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b1.clone())).unwrap(); let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // prepare B3 before we finalize A2, because otherwise we won't be able to // read changes trie configuration after A2 is finalized @@ -1331,7 +1339,7 @@ fn doesnt_import_blocks_that_revert_finality() { // B3 at the same height but that doesn't include it ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); + let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); let expected_err = ConsensusError::ClientImport( sp_blockchain::Error::RuntimeApiError( sp_api::ApiError::Application(Box::new(sp_blockchain::Error::NotInFinalizedChain)) @@ -1356,7 +1364,7 @@ fn doesnt_import_blocks_that_revert_finality() { }).unwrap(); let c1 = c1.build().unwrap().block; - let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); + let import_err = block_on(client.import(BlockOrigin::Own, c1)).err().unwrap(); let expected_err = ConsensusError::ClientImport( sp_blockchain::Error::NotInFinalizedChain.to_string() ); @@ -1367,7 +1375,6 @@ fn doesnt_import_blocks_that_revert_finality() { ); } - #[test] fn respects_block_rules() { fn run_test( @@ -1396,7 +1403,7 @@ fn respects_block_rules() { allow_missing_state: false, import_existing: false, }; - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) @@ -1414,11 +1421,11 @@ fn respects_block_rules() { if record_only { known_bad.insert(block_not_ok.hash()); } else { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::KnownBad); } // Now going to the fork - client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, block_ok)).unwrap(); // And check good fork let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) @@ -1436,7 +1443,7 @@ fn respects_block_rules() { if record_only { fork_rules.push((1, block_ok.hash().clone())); } - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); // And now try bad fork let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) @@ -1453,7 +1460,7 @@ fn respects_block_rules() { }; if !record_only { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::KnownBad); } } @@ -1491,8 +1498,11 @@ fn returns_status_for_pruned_blocks() { let mut client = TestClientBuilder::with_backend(backend).build(); - let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; + let a1 = client.new_block_at( + &BlockId::Number(0), + Default::default(), + false, + ).unwrap().build().unwrap().block; let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); @@ -1513,17 +1523,32 @@ fn returns_status_for_pruned_blocks() { import_existing: false, }; - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::imported(false), + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::Unknown, + ); - client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, a1.clone())).unwrap(); - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainWithState, + ); - let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); + let a2 = client.new_block_at( + &BlockId::Hash(a1.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; + block_on(client.import_as_final(BlockOrigin::Own, a2.clone())).unwrap(); let check_block_a2 = BlockCheckParams { hash: a2.hash().clone(), @@ -1533,15 +1558,30 @@ fn returns_status_for_pruned_blocks() { import_existing: false, }; - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a2.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), + BlockStatus::InChainWithState, + ); - let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; + let a3 = client.new_block_at( + &BlockId::Hash(a2.hash()), + Default::default(), + false, + ).unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import_as_final(BlockOrigin::Own, a3.clone())).unwrap(); let check_block_a3 = BlockCheckParams { hash: a3.hash().clone(), number: 2, @@ -1551,12 +1591,30 @@ fn returns_status_for_pruned_blocks() { }; // a1 and a2 are both pruned at this point - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); + assert_eq!( + block_on(client.check_block(check_block_a1.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a2.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), + BlockStatus::InChainPruned, + ); + assert_eq!( + block_on(client.check_block(check_block_a3.clone())).unwrap(), + ImportResult::AlreadyInChain, + ); + assert_eq!( + client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), + BlockStatus::InChainWithState, + ); let mut check_block_b1 = BlockCheckParams { hash: b1.hash().clone(), @@ -1565,11 +1623,20 @@ fn returns_status_for_pruned_blocks() { allow_missing_state: false, import_existing: false, }; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::MissingState, + ); check_block_b1.allow_missing_state = true; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::imported(false), + ); check_block_b1.parent_hash = H256::random(); - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); + assert_eq!( + block_on(client.check_block(check_block_b1.clone())).unwrap(), + ImportResult::UnknownParent, + ); } #[test] @@ -1600,18 +1667,18 @@ fn imports_blocks_with_changes_tries_config_change() { (1..11).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (11..12).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (12..23).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (23..24).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); @@ -1620,24 +1687,24 @@ fn imports_blocks_with_changes_tries_config_change() { digest_levels: 1, })).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (24..26).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (26..27).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (27..28).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (28..29).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); @@ -1646,23 +1713,23 @@ fn imports_blocks_with_changes_tries_config_change() { digest_levels: 1, })).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (29..30).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (30..31).for_each(|number| { let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); (31..32).for_each(|number| { let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); }); // now check that configuration cache works @@ -1778,7 +1845,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - client.import_block(import, Default::default()).unwrap(); + block_on(client.import_block(import, Default::default())).unwrap(); }; // after importing a block we should still have 4 notification sinks @@ -1821,14 +1888,14 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, a1.clone())).unwrap(); let a2 = client.new_block_at( &BlockId::Hash(a1.hash()), Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, a2.clone())).unwrap(); let mut b1 = client.new_block_at( &BlockId::Number(0), @@ -1843,7 +1910,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi nonce: 0, }).unwrap(); let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::NetworkInitialSync, b1.clone()).unwrap(); + block_on(client.import(BlockOrigin::NetworkInitialSync, b1.clone())).unwrap(); let b2 = client.new_block_at( &BlockId::Hash(b1.hash()), @@ -1852,7 +1919,7 @@ fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifi ).unwrap().build().unwrap().block; // Should trigger a notification because we reorg - client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone()).unwrap(); + block_on(client.import_as_best(BlockOrigin::NetworkInitialSync, b2.clone())).unwrap(); // There should be one notification let notification = notification_stream.next().unwrap(); diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 1f73f3cca35e9..8961f2549b2d8 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -16,16 +16,24 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! State database maintenance. Handles canonicalization and pruning in the database. The input to -//! this module is a `ChangeSet` which is basically a list of key-value pairs (trie nodes) that -//! were added or deleted during block execution. +//! State database maintenance. Handles canonicalization and pruning in the database. //! //! # Canonicalization. //! Canonicalization window tracks a tree of blocks identified by header hash. The in-memory -//! overlay allows to get any node that was inserted in any of the blocks within the window. -//! The tree is journaled to the backing database and rebuilt on startup. +//! overlay allows to get any trie node that was inserted in any of the blocks within the window. +//! The overlay is journaled to the backing database and rebuilt on startup. +//! There's a limit of 32 blocks that may have the same block number in the canonicalization window. +//! //! Canonicalization function selects one root from the top of the tree and discards all other roots -//! and their subtrees. +//! and their subtrees. Upon canonicalization all trie nodes that were inserted in the block are added to +//! the backing DB and block tracking is moved to the pruning window, where no forks are allowed. +//! +//! # Canonicalization vs Finality +//! Database engine uses a notion of canonicality, rather then finality. A canonical block may not be yet finalized +//! from the perspective of the consensus engine, but it still can't be reverted in the database. Most of the time +//! during normal operation last canonical block is the same as last finalized. However if finality stall for a +//! long duration for some reason, there's only a certain number of blocks that can fit in the non-canonical overlay, +//! so canonicalization of an unfinalized block may be forced. //! //! # Pruning. //! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until @@ -89,6 +97,8 @@ pub enum Error { InvalidParent, /// Invalid pruning mode specified. Contains expected mode. InvalidPruningMode(String), + /// Too many unfinalized sibling blocks inserted. + TooManySiblingBlocks, } /// Pinning error type. @@ -112,6 +122,7 @@ impl fmt::Debug for Error { Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), Error::InvalidPruningMode(e) => write!(f, "Expected pruning mode: {}", e), + Error::TooManySiblingBlocks => write!(f, "Too many sibling blocks inserted"), } } } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 551bf5fb860c7..3f0c7d132f746 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -30,6 +30,7 @@ use log::trace; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; +const MAX_BLOCKS_PER_LEVEL: u64 = 32; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] @@ -149,10 +150,8 @@ impl NonCanonicalOverlay { pub fn new(db: &D) -> Result, Error> { let last_canonicalized = db.get_meta(&to_meta_key(LAST_CANONICAL, &())) .map_err(|e| Error::Db(e))?; - let last_canonicalized = match last_canonicalized { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?), - None => None, - }; + let last_canonicalized = last_canonicalized + .map(|buffer| <(BlockHash, u64)>::decode(&mut buffer.as_slice())).transpose()?; let mut levels = VecDeque::new(); let mut parents = HashMap::new(); let mut values = HashMap::new(); @@ -162,28 +161,30 @@ impl NonCanonicalOverlay { let mut total: u64 = 0; block += 1; loop { - let mut index: u64 = 0; let mut level = Vec::new(); - loop { + for index in 0 .. MAX_BLOCKS_PER_LEVEL { let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; - }, - None => break, + if let Some(record) = db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + let record: JournalRecord = Decode::decode(&mut record.as_slice())?; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!( + target: "state-db", + "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", + block, + index, + overlay.inserted.len(), + overlay.deleted.len() + ); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + total += 1; } } if level.is_empty() { @@ -241,6 +242,10 @@ impl NonCanonicalOverlay { .expect("number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed") }; + if level.len() >= MAX_BLOCKS_PER_LEVEL as usize { + return Err(Error::TooManySiblingBlocks); + } + let index = level.len() as u64; let journal_key = to_journal_key(number, index); @@ -513,7 +518,7 @@ mod tests { use std::io; use sp_core::H256; use super::{NonCanonicalOverlay, to_journal_key}; - use crate::{ChangeSet, CommitSet}; + use crate::{ChangeSet, CommitSet, MetaDb}; use crate::test::{make_db, make_changeset}; fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { @@ -716,7 +721,6 @@ mod tests { #[test] fn complex_tree() { - use crate::MetaDb; let mut db = make_db(&[]); // - 1 - 1_1 - 1_1_1 @@ -958,4 +962,42 @@ mod tests { assert!(!contains(&overlay, 1)); assert!(overlay.pinned.is_empty()); } + + #[test] + fn restore_from_journal_after_canonicalize_no_first() { + // This test discards a branch that is journaled under a non-zero index on level 1, + // making sure all journals are loaded for each level even if some of them are missing. + let root = H256::random(); + let h1 = H256::random(); + let h2 = H256::random(); + let h11 = H256::random(); + let h21 = H256::random(); + let mut db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit(&overlay.insert::(&root, 10, &H256::default(), make_changeset(&[], &[])).unwrap()); + db.commit(&overlay.insert::(&h1, 11, &root, make_changeset(&[1], &[])).unwrap()); + db.commit(&overlay.insert::(&h2, 11, &root, make_changeset(&[2], &[])).unwrap()); + db.commit(&overlay.insert::(&h11, 12, &h1, make_changeset(&[11], &[])).unwrap()); + db.commit(&overlay.insert::(&h21, 12, &h2, make_changeset(&[21], &[])).unwrap()); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&root, &mut commit).unwrap(); + overlay.canonicalize::(&h2, &mut commit).unwrap(); // h11 should stay in the DB + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + assert!(contains(&overlay, 21)); + assert!(!contains(&overlay, 11)); + assert!(db.get_meta(&to_journal_key(12, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(12, 0)).unwrap().is_none()); + + // Restore into a new overlay and check that journaled value exists. + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + assert!(contains(&overlay, 21)); + + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h21, &mut commit).unwrap(); // h11 should stay in the DB + db.commit(&commit); + overlay.apply_pending(); + assert!(!contains(&overlay, 21)); + } } diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index 8466523116440..4cb4955995540 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -37,7 +37,7 @@ type SharedEpochChanges = sc_consensus_epochs::SharedEpochChanges { #[error(transparent)] Blockchain(#[from] sp_blockchain::Error), - + #[error("Failed to load the block weight for block {0:?}")] LoadingBlockWeightFailed(::Hash), @@ -94,7 +94,7 @@ impl SyncStateRpcHandler chain_spec, client, shared_authority_set, shared_epoch_changes, deny_unsafe, } } - + fn build_sync_state(&self) -> Result, Error> { let finalized_hash = self.client.info().finalized_hash; let finalized_header = self.client.header(BlockId::Hash(finalized_hash))? @@ -108,7 +108,7 @@ impl SyncStateRpcHandler Ok(sc_chain_spec::LightSyncState { finalized_block_header: finalized_header, - babe_epoch_changes: self.shared_epoch_changes.lock().clone(), + babe_epoch_changes: self.shared_epoch_changes.shared_data().clone(), babe_finalized_block_weight: finalized_block_weight, grandpa_authority_set: self.shared_authority_set.clone_inner(), }) diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index ab02104c15c35..6e6ae408247a3 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = "0.11.1" futures = "0.3.9" wasm-timer = "0.2.5" -libp2p = { version = "0.36.0", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.37.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "1.0.4" rand = "0.7.2" diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 8d3b605db01a5..5c233d54903dc 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -389,10 +389,7 @@ impl Telemetry { /// The `connection_message` argument is a JSON object that is sent every time the connection /// (re-)establishes. pub fn start_telemetry(&mut self, connection_message: ConnectionMessage) -> Result<()> { - let endpoints = match self.endpoints.take() { - Some(x) => x, - None => return Err(Error::TelemetryAlreadyInitialized), - }; + let endpoints = self.endpoints.take().ok_or_else(|| Error::TelemetryAlreadyInitialized)?; self.register_sender .unbounded_send(Register::Telemetry { diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index 41947d4c0ed8e..54620d30bb561 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -246,7 +246,7 @@ fn parse_target(s: &str) -> (String, Level) { Some(i) => { let target = s[0..i].to_string(); if s.len() > i { - let level = s[i + 1..s.len()].parse::().unwrap_or(Level::TRACE); + let level = s[i + 1..].parse::().unwrap_or(Level::TRACE); (target, level) } else { (target, Level::TRACE) diff --git a/client/tracing/src/logging/event_format.rs b/client/tracing/src/logging/event_format.rs index 25fd2f3ba3d70..5e7a5246cca00 100644 --- a/client/tracing/src/logging/event_format.rs +++ b/client/tracing/src/logging/event_format.rs @@ -43,6 +43,8 @@ pub struct EventFormat { pub display_thread_name: bool, /// Enable ANSI terminal colors for formatted output. pub enable_color: bool, + /// Duplicate INFO, WARN and ERROR messages to stdout. + pub dup_to_stdout: bool, } impl EventFormat @@ -123,7 +125,19 @@ where writer: &mut dyn fmt::Write, event: &Event, ) -> fmt::Result { - self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) + if self.dup_to_stdout && ( + event.metadata().level() == &Level::INFO || + event.metadata().level() == &Level::WARN || + event.metadata().level() == &Level::ERROR + ) { + let mut out = String::new(); + self.format_event_custom(CustomFmtContext::FmtContext(ctx), &mut out, event)?; + writer.write_str(&out)?; + print!("{}", out); + Ok(()) + } else { + self.format_event_custom(CustomFmtContext::FmtContext(ctx), writer, event) + } } } diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index 187b6a387f328..1023879e3d7f0 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -167,6 +167,7 @@ where display_level: !simple, display_thread_name: !simple, enable_color, + dup_to_stdout: !atty::is(atty::Stream::Stderr) && atty::is(atty::Stream::Stdout), }; let builder = FmtSubscriber::builder().with_env_filter(env_filter); diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index 445ef0adaf7b7..9b644bbdb3b67 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -155,13 +155,13 @@ impl Transaction { /// every reason to be commented. That's why we `Transaction` is not `Clone`, /// but there's explicit `duplicate` method. pub fn duplicate(&self) -> Self { - Transaction { + Self { data: self.data.clone(), - bytes: self.bytes.clone(), + bytes: self.bytes, hash: self.hash.clone(), - priority: self.priority.clone(), + priority: self.priority, source: self.source, - valid_till: self.valid_till.clone(), + valid_till: self.valid_till, requires: self.requires.clone(), provides: self.provides.clone(), propagate: self.propagate, @@ -174,16 +174,9 @@ impl fmt::Debug for Transaction where Extrinsic: fmt::Debug, { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fn print_tags(fmt: &mut fmt::Formatter, tags: &[Tag]) -> fmt::Result { - let mut it = tags.iter(); - if let Some(t) = it.next() { - write!(fmt, "{}", HexDisplay::from(t))?; - } - for t in it { - write!(fmt, ",{}", HexDisplay::from(t))?; - } - Ok(()) - } + let join_tags = |tags: &[Tag]| { + tags.iter().map(|tag| HexDisplay::from(tag).to_string()).collect::>().join(", ") + }; write!(fmt, "Transaction {{ ")?; write!(fmt, "hash: {:?}, ", &self.hash)?; @@ -192,11 +185,8 @@ impl fmt::Debug for Transaction where write!(fmt, "bytes: {:?}, ", &self.bytes)?; write!(fmt, "propagate: {:?}, ", &self.propagate)?; write!(fmt, "source: {:?}, ", &self.source)?; - write!(fmt, "requires: [")?; - print_tags(fmt, &self.requires)?; - write!(fmt, "], provides: [")?; - print_tags(fmt, &self.provides)?; - write!(fmt, "], ")?; + write!(fmt, "requires: [{}], ", join_tags(&self.requires))?; + write!(fmt, "provides: [{}], ", join_tags(&self.provides))?; write!(fmt, "data: {:?}", &self.data)?; write!(fmt, "}}")?; Ok(()) @@ -239,7 +229,7 @@ impl Default for Bas impl BasePool { /// Create new pool given reject_future_transactions flag. pub fn new(reject_future_transactions: bool) -> Self { - BasePool { + Self { reject_future_transactions, future: Default::default(), ready: Default::default(), @@ -320,13 +310,8 @@ impl BasePool tx, - None => break, - }; - + // take first transaction from the list + while let Some(tx) = to_import.pop() { // find transactions in Future that it unlocks to_import.append(&mut self.future.satisfy_tags(&tx.transaction.provides)); @@ -1087,7 +1072,7 @@ mod tests { }), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03,02], provides: [04], data: [4]}".to_owned() +source: TransactionSource::External, requires: [03, 02], provides: [04], data: [4]}".to_owned() ); } diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/graph/src/future.rs index 98d49817e32a8..9dcfd13808d9b 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/graph/src/future.rs @@ -47,24 +47,22 @@ impl fmt::Debug for WaitingTransaction>().join(", "), + )?; + write!(fmt, "}}") } } impl Clone for WaitingTransaction { fn clone(&self) -> Self { - WaitingTransaction { + Self { transaction: self.transaction.clone(), missing_tags: self.missing_tags.clone(), - imported_at: self.imported_at.clone(), + imported_at: self.imported_at, } } } @@ -90,7 +88,7 @@ impl WaitingTransaction { .cloned() .collect(); - WaitingTransaction { + Self { transaction: Arc::new(transaction), missing_tags, imported_at: Instant::now(), @@ -123,7 +121,7 @@ pub struct FutureTransactions { impl Default for FutureTransactions { fn default() -> Self { - FutureTransactions { + Self { wanted_tags: Default::default(), waiting: Default::default(), } diff --git a/client/transaction-pool/graph/src/listener.rs b/client/transaction-pool/graph/src/listener.rs index d707c0a0f802f..563243bf45945 100644 --- a/client/transaction-pool/graph/src/listener.rs +++ b/client/transaction-pool/graph/src/listener.rs @@ -20,12 +20,14 @@ use std::{ collections::HashMap, hash, fmt::Debug, }; + use linked_hash_map::LinkedHashMap; use serde::Serialize; -use crate::{watcher, ChainApi, ExtrinsicHash, BlockHash}; use log::{debug, trace, warn}; use sp_runtime::traits; +use crate::{watcher, ChainApi, ExtrinsicHash, BlockHash}; + /// Extrinsic pool default listener. pub struct Listener { watchers: HashMap>>, @@ -37,7 +39,7 @@ const MAX_FINALITY_WATCHERS: usize = 512; impl Default for Listener { fn default() -> Self { - Listener { + Self { watchers: Default::default(), finality_watchers: Default::default(), } @@ -115,7 +117,7 @@ impl Listener { while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { if let Some((hash, txs)) = self.finality_watchers.pop_front() { for tx in txs { - self.fire(&tx, |s| s.finality_timeout(hash.clone())); + self.fire(&tx, |s| s.finality_timeout(hash)); } } } diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index eee14049d41a6..7f9bc3c757f11 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -21,8 +21,6 @@ use std::{ sync::Arc, }; -use crate::{base_pool as base, watcher::Watcher}; - use futures::Future; use sp_runtime::{ generic::BlockId, @@ -35,6 +33,7 @@ use sp_transaction_pool::error; use wasm_timer::Instant; use futures::channel::mpsc::Receiver; +use crate::{base_pool as base, watcher::Watcher}; use crate::validated_pool::ValidatedPool; pub use crate::validated_pool::{IsValidator, ValidatedTransaction}; @@ -111,7 +110,7 @@ pub struct Options { impl Default for Options { fn default() -> Self { - Options { + Self { ready: base::Limit { count: 8192, total_bytes: 20 * 1024 * 1024, @@ -151,7 +150,7 @@ where impl Pool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { - Pool { + Self { validated_pool: Arc::new(ValidatedPool::new(options, is_validator, api)), } } @@ -193,7 +192,7 @@ impl Pool { res.expect("One extrinsic passed; one result returned; qed") } - /// Import a single extrinsic and starts to watch their progress in the pool. + /// Import a single extrinsic and starts to watch its progress in the pool. pub async fn submit_and_watch( &self, at: &BlockId, @@ -242,8 +241,8 @@ impl Pool { // Prune all transactions that provide given tags let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; - let pruned_transactions = hashes.into_iter().cloned() - .chain(prune_status.pruned.iter().map(|tx| tx.hash.clone())); + let pruned_transactions = hashes.iter().cloned() + .chain(prune_status.pruned.iter().map(|tx| tx.hash)); self.validated_pool.fire_pruned(at, pruned_transactions) } @@ -323,10 +322,7 @@ impl Pool { ) -> Result<(), B::Error> { log::debug!(target: "txpool", "Pruning at {:?}", at); // Prune all transactions that provide given tags - let prune_status = match self.validated_pool.prune_tags(tags) { - Ok(prune_status) => prune_status, - Err(e) => return Err(e), - }; + let prune_status = self.validated_pool.prune_tags(tags)?; // Make sure that we don't revalidate extrinsics that were part of the recently // imported block. This is especially important for UTXO-like chains cause the @@ -337,7 +333,7 @@ impl Pool { // note that `known_imported_hashes` will be rejected here due to temporary ban. let pruned_hashes = prune_status.pruned .iter() - .map(|tx| tx.hash.clone()).collect::>(); + .map(|tx| tx.hash).collect::>(); let pruned_transactions = prune_status.pruned .into_iter() .map(|tx| (tx.source, tx.data.clone())); @@ -402,7 +398,7 @@ impl Pool { let ignore_banned = matches!(check, CheckBannedBeforeVerify::No); if let Err(err) = self.validated_pool.check_is_known(&hash, ignore_banned) { - return (hash.clone(), ValidatedTransaction::Invalid(hash, err.into())) + return (hash, ValidatedTransaction::Invalid(hash, err)) } let validation_result = self.validated_pool.api().validate_transaction( @@ -413,17 +409,17 @@ impl Pool { let status = match validation_result { Ok(status) => status, - Err(e) => return (hash.clone(), ValidatedTransaction::Invalid(hash, e)), + Err(e) => return (hash, ValidatedTransaction::Invalid(hash, e)), }; let validity = match status { Ok(validity) => { if validity.provides.is_empty() { - ValidatedTransaction::Invalid(hash.clone(), error::Error::NoTagsProvided.into()) + ValidatedTransaction::Invalid(hash, error::Error::NoTagsProvided.into()) } else { ValidatedTransaction::valid_at( block_number.saturated_into::(), - hash.clone(), + hash, source, xt, bytes, @@ -432,9 +428,9 @@ impl Pool { } }, Err(TransactionValidityError::Invalid(e)) => - ValidatedTransaction::Invalid(hash.clone(), error::Error::InvalidTransaction(e).into()), + ValidatedTransaction::Invalid(hash, error::Error::InvalidTransaction(e).into()), Err(TransactionValidityError::Unknown(e)) => - ValidatedTransaction::Unknown(hash.clone(), error::Error::UnknownTransaction(e).into()), + ValidatedTransaction::Unknown(hash, error::Error::UnknownTransaction(e).into()), }; (hash, validity) diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index c2af4f9cb9140..7946f49e6a17a 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -50,7 +50,7 @@ pub struct TransactionRef { impl Clone for TransactionRef { fn clone(&self) -> Self { - TransactionRef { + Self { transaction: self.transaction.clone(), insertion_id: self.insertion_id, } @@ -93,7 +93,7 @@ pub struct ReadyTx { impl Clone for ReadyTx { fn clone(&self) -> Self { - ReadyTx { + Self { transaction: self.transaction.clone(), unlocks: self.unlocks.clone(), requires_offset: self.requires_offset, @@ -128,7 +128,7 @@ impl tracked_map::Size for ReadyTx { impl Default for ReadyTransactions { fn default() -> Self { - ReadyTransactions { + Self { insertion_id: Default::default(), provided_tags: Default::default(), ready: Default::default(), @@ -259,7 +259,7 @@ impl ReadyTransactions { /// (i.e. the entire subgraph that this transaction is a start of will be removed). /// All removed transactions are returned. pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec>> { - let to_remove = hashes.iter().cloned().collect::>(); + let to_remove = hashes.to_vec(); self.remove_subtree_with_tag_filter(to_remove, None) } @@ -527,12 +527,9 @@ impl Iterator for BestIterator { satisfied += 1; Some((satisfied, tx_ref)) // then get from the pool - } else if let Some(next) = self.all.read().get(hash) { - Some((next.requires_offset + 1, next.transaction.clone())) } else { - None + self.all.read().get(hash).map(|next| (next.requires_offset + 1, next.transaction.clone())) }; - if let Some((satisfied, tx_ref)) = res { self.best_or_awaiting(satisfied, tx_ref) } diff --git a/client/transaction-pool/graph/src/rotator.rs b/client/transaction-pool/graph/src/rotator.rs index 3d9b359fd365f..4c800c767183a 100644 --- a/client/transaction-pool/graph/src/rotator.rs +++ b/client/transaction-pool/graph/src/rotator.rs @@ -48,7 +48,7 @@ pub struct PoolRotator { impl Default for PoolRotator { fn default() -> Self { - PoolRotator { + Self { ban_time: Duration::from_secs(60 * 30), banned_until: Default::default(), } @@ -78,7 +78,6 @@ impl PoolRotator { } } - /// Bans extrinsic if it's stale. /// /// Returns `true` if extrinsic is stale and got banned. diff --git a/client/transaction-pool/graph/src/tracked_map.rs b/client/transaction-pool/graph/src/tracked_map.rs index 9cd6ad84b483d..98fd9e21b3160 100644 --- a/client/transaction-pool/graph/src/tracked_map.rs +++ b/client/transaction-pool/graph/src/tracked_map.rs @@ -22,7 +22,7 @@ use std::{ }; use parking_lot::{RwLock, RwLockWriteGuard, RwLockReadGuard}; -/// Something that can report it's size. +/// Something that can report its size. pub trait Size { fn size(&self) -> usize; } @@ -64,14 +64,14 @@ impl TrackedMap { } /// Lock map for read. - pub fn read<'a>(&'a self) -> TrackedMapReadAccess<'a, K, V> { + pub fn read(&self) -> TrackedMapReadAccess { TrackedMapReadAccess { inner_guard: self.index.read(), } } /// Lock map for write. - pub fn write<'a>(&'a self) -> TrackedMapWriteAccess<'a, K, V> { + pub fn write(&self) -> TrackedMapWriteAccess { TrackedMapWriteAccess { inner_guard: self.index.write(), bytes: &self.bytes, @@ -90,7 +90,7 @@ where K: Eq + std::hash::Hash { /// Lock map for read. - pub fn read<'a>(&'a self) -> TrackedMapReadAccess<'a, K, V> { + pub fn read(&self) -> TrackedMapReadAccess { TrackedMapReadAccess { inner_guard: self.0.read(), } @@ -136,10 +136,10 @@ where let new_bytes = val.size(); self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed); self.length.fetch_add(1, AtomicOrdering::Relaxed); - self.inner_guard.insert(key, val).and_then(|old_val| { + self.inner_guard.insert(key, val).map(|old_val| { self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed); self.length.fetch_sub(1, AtomicOrdering::Relaxed); - Some(old_val) + old_val }) } @@ -186,4 +186,4 @@ mod tests { assert_eq!(map.bytes(), 1); assert_eq!(map.len(), 1); } -} \ No newline at end of file +} diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index c02aab47d8808..6042189e87e23 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -22,12 +22,7 @@ use std::{ sync::Arc, }; -use crate::base_pool as base; -use crate::listener::Listener; -use crate::rotator::PoolRotator; -use crate::watcher::Watcher; use serde::Serialize; - use parking_lot::{Mutex, RwLock}; use sp_runtime::{ generic::BlockId, @@ -39,7 +34,10 @@ use wasm_timer::Instant; use futures::channel::mpsc::{channel, Sender}; use retain_mut::RetainMut; -use crate::base_pool::PruneStatus; +use crate::base_pool::{self as base, PruneStatus}; +use crate::listener::Listener; +use crate::rotator::PoolRotator; +use crate::watcher::Watcher; use crate::pool::{ EventStream, Options, ChainApi, BlockHash, ExtrinsicHash, ExtrinsicFor, TransactionFor, }; @@ -95,13 +93,13 @@ pub struct IsValidator(Box bool + Send + Sync>); impl From for IsValidator { fn from(is_validator: bool) -> Self { - IsValidator(Box::new(move || is_validator)) + Self(Box::new(move || is_validator)) } } impl From bool + Send + Sync>> for IsValidator { fn from(is_validator: Box bool + Send + Sync>) -> Self { - IsValidator(is_validator) + Self(is_validator) } } @@ -134,7 +132,7 @@ impl ValidatedPool { /// Create a new transaction pool. pub fn new(options: Options, is_validator: IsValidator, api: Arc) -> Self { let base_pool = base::BasePool::new(options.reject_future_transactions); - ValidatedPool { + Self { is_validator, options, listener: Default::default(), @@ -168,7 +166,7 @@ impl ValidatedPool { if !ignore_banned && self.is_banned(tx_hash) { Err(error::Error::TemporarilyBanned.into()) } else if self.pool.read().is_imported(tx_hash) { - Err(error::Error::AlreadyImported(Box::new(tx_hash.clone())).into()) + Err(error::Error::AlreadyImported(Box::new(*tx_hash)).into()) } else { Ok(()) } @@ -209,7 +207,7 @@ impl ValidatedPool { if let base::Imported::Ready { ref hash, .. } = imported { self.import_notification_sinks.lock() .retain_mut(|sink| { - match sink.try_send(hash.clone()) { + match sink.try_send(*hash) { Ok(()) => true, Err(e) => { if e.is_full() { @@ -225,15 +223,15 @@ impl ValidatedPool { let mut listener = self.listener.write(); fire_events(&mut *listener, &imported); - Ok(imported.hash().clone()) + Ok(*imported.hash()) }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); - Err(err.into()) + Err(err) }, ValidatedTransaction::Unknown(hash, err) => { self.listener.write().invalid(&hash, false); - Err(err.into()) + Err(err) }, } } @@ -258,9 +256,9 @@ impl ValidatedPool { let removed = { let mut pool = self.pool.write(); let removed = pool.enforce_limits(ready_limit, future_limit) - .into_iter().map(|x| x.hash.clone()).collect::>(); + .into_iter().map(|x| x.hash).collect::>(); // ban all removed transactions - self.rotator.ban(&Instant::now(), removed.iter().map(|x| x.clone())); + self.rotator.ban(&Instant::now(), removed.iter().copied()); removed }; if !removed.is_empty() { @@ -295,9 +293,9 @@ impl ValidatedPool { }, ValidatedTransaction::Invalid(hash, err) => { self.rotator.ban(&Instant::now(), std::iter::once(hash)); - Err(err.into()) + Err(err) }, - ValidatedTransaction::Unknown(_, err) => Err(err.into()), + ValidatedTransaction::Unknown(_, err) => Err(err), } } @@ -327,9 +325,9 @@ impl ValidatedPool { // note we are not considering tx with hash invalid here - we just want // to remove it along with dependent transactions and `remove_subtree()` // does exactly what we need - let removed = pool.remove_subtree(&[hash.clone()]); + let removed = pool.remove_subtree(&[hash]); for removed_tx in removed { - let removed_hash = removed_tx.hash.clone(); + let removed_hash = removed_tx.hash; let updated_transaction = updated_transactions.remove(&removed_hash); let tx_to_resubmit = if let Some(updated_tx) = updated_transaction { updated_tx @@ -343,7 +341,7 @@ impl ValidatedPool { ValidatedTransaction::Valid(transaction) }; - initial_statuses.insert(removed_hash.clone(), Status::Ready); + initial_statuses.insert(removed_hash, Status::Ready); txs_to_resubmit.push((removed_hash, tx_to_resubmit)); } // make sure to remove the hash even if it's not present in the pool any more. @@ -370,7 +368,7 @@ impl ValidatedPool { final_statuses.insert(hash, Status::Failed); } for tx in removed { - final_statuses.insert(tx.hash.clone(), Status::Dropped); + final_statuses.insert(tx.hash, Status::Dropped); } }, base::Imported::Future { .. } => { @@ -400,7 +398,7 @@ impl ValidatedPool { // queue, updating final statuses as required if reject_future_transactions { for future_tx in pool.clear_future() { - final_statuses.insert(future_tx.hash.clone(), Status::Dropped); + final_statuses.insert(future_tx.hash, Status::Dropped); } } @@ -428,7 +426,7 @@ impl ValidatedPool { self.pool.read().by_hashes(&hashes) .into_iter() .map(|existing_in_pool| existing_in_pool - .map(|transaction| transaction.provides.iter().cloned().collect())) + .map(|transaction| transaction.provides.to_vec())) .collect() } @@ -477,7 +475,7 @@ impl ValidatedPool { .into_iter() .enumerate() .filter_map(|(idx, r)| match r.map_err(error::IntoPoolError::into_pool_error) { - Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx].clone()), + Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx]), _ => None, }); // Fire `pruned` notifications for collected hashes and make sure to include @@ -498,7 +496,7 @@ impl ValidatedPool { hashes: impl Iterator>, ) -> Result<(), B::Error> { let header_hash = self.api.block_id_to_hash(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())?; + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))?; let mut listener = self.listener.write(); let mut set = HashSet::with_capacity(hashes.size_hint().0); for h in hashes { @@ -519,13 +517,13 @@ impl ValidatedPool { /// See `prune_tags` if you want this. pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { let block_number = self.api.block_id_to_number(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())? + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)))? .saturated_into::(); let now = Instant::now(); let to_remove = { self.ready() .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) - .map(|tx| tx.hash.clone()) + .map(|tx| tx.hash) .collect::>() }; let futures_to_remove: Vec> = { @@ -533,7 +531,7 @@ impl ValidatedPool { let mut hashes = Vec::new(); for tx in p.futures() { if self.rotator.ban_if_stale(&now, block_number, &tx) { - hashes.push(tx.hash.clone()); + hashes.push(tx.hash); } } hashes diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index b6f19ba376861..efd5a7a14342b 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -167,7 +167,7 @@ impl BasicPool let (revalidation_queue, background_task, notifier) = revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); ( - BasicPool { + Self { api: pool_api, pool, revalidation_queue: Arc::new(revalidation_queue), @@ -203,7 +203,7 @@ impl BasicPool spawner.spawn("txpool-background", background_task); } - BasicPool { + Self { api: pool_api, pool, revalidation_queue: Arc::new(revalidation_queue), diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index a41632ed8de88..063947b383d03 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -985,7 +985,7 @@ fn import_notification_to_pool_maintain_works() { let mut block_builder = client.new_block(Default::default()).unwrap(); block_builder.push(xt).unwrap(); let block = block_builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); + block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Get the notification of the block import and maintain the pool with it, // Now, the pool should not contain any transactions. diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md deleted file mode 100644 index bc4a15eb15f27..0000000000000 --- a/docs/Upgrading-2.0-to-3.0.md +++ /dev/null @@ -1,1120 +0,0 @@ -# Upgrading from Substrate 2.0 to 3.0 - -An incomplete guide. - -## Refreshing the node-template - -Not much has changed on the top and API level for developing Substrate betweeen 2.0 and 3.0. If you've made only small changes to the node-template, we recommend to do the following - it is easiest and quickest path forward: -1. take a diff between 2.0 and your changes -2. store that diff -3. remove everything, copy over the 3.0 node-template -4. try re-applying your diff, manually, a hunk at a time. - -## In-Depth guide on the changes - -If you've made significant changes or diverted from the node-template a lot, starting out with that is probably not helping. For that case, we'll take a look at all changes between 2.0 and 3.0 to the fully-implemented node and explain them one by one, so you can follow up, what needs to be changing for your node. - -_Note_: Of course, step 1 is to upgrade your `Cargo.toml`'s to use the latest version of Substrate and all dependencies. - -We'll be taking the diff from 2.0.1 to 3.0.0 on `bin/node` as the baseline of what has changed between these two versions in terms of adapting ones code base. We will not be covering the changes made on the tests and bench-marking as they are mostly reactions to the other changes. - -### Versions upgrade - -First and foremost you have to upgrade the version pf the dependencies of course, that's `0.8.x -> 0.9.0` and `2.0.x -> 3.0.0` for all `sc-`, `sp-`, `frame-`, and `pallet-` coming from Parity. Further more this release also upgraded its own dependencies, most notably, we are now using `parity-scale-codec 2.0`, `parking_lot 0.11` and `substrate-wasm-builder 3.0.0` (as build dependency). All other dependency upgrades should resolve automatically or are just internal. However you might see some error that another dependency/type you have as a dependency and one of our upgraded crates don't match up, if so please check the version of said dependency - we've probably ugraded it. - -### WASM-Builder - -The new version of wasm-builder has gotten a bit smarter and a lot faster (you should definitly switch). Once you've upgraded the dependency, in most cases you just have to remove the now obsolete `with_wasm_builder_from_crates_or_path`-function and you are good to go: - -```diff: rust ---- a/bin/node/runtime/build.rs -+++ b/bin/node/runtime/build.rs -@@ -15,12 +15,11 @@ - // See the License for the specific language governing permissions and - // limitations under the License. - --use wasm_builder_runner::WasmBuilder; -+use substrate_wasm_builder::WasmBuilder; - - fn main() { - WasmBuilder::new() - .with_current_project() -- .with_wasm_builder_from_crates_or_path("2.0.0", "../../../utils/wasm-builder") - .export_heap_base() - .import_memory() - .build() -``` - -### Runtime - -#### FRAME 2.0 - -The new FRAME 2.0 macros are a lot nicer to use and easier to read. While we were on that change though, we also cleaned up some mainly internal names and traits. The old `macro`'s still work and also produce the new structure, however, when plugging all that together as a Runtime, there's some things we have to adapt now: - -##### `::Trait for Runtime` becomes `::Config for Runtime` - -The most visible and significant change is that the macros no longer generate the `$pallet::Trait` but now a much more aptly named `$pallet::Config`. Thus, we need to rename all `::Trait for Runtime` into`::Config for Runtime`, e.g. for the `sudo` pallet we must do: - -```diff --impl pallet_sudo::Trait for Runtime { -+impl pallet_sudo::Config for Runtime { -``` - -The same goes for all `` and alike, which simply becomes ``. - -#### SS58 Prefix is now a runtime param - - -Since [#7810](https://github.com/paritytech/substrate/pull/7810) we don't define the ss58 prefix in the chainspec anymore but moved it into the runtime. Namely, `frame_system` now needs a new `SS58Prefix`, which in substrate node we have defined for ourselves as: `pub const SS58Prefix: u8 = 42;`. Use your own chain-specific value there. - -#### Weight Definition - -`type WeightInfo` has changed and instead on `weights::pallet_$name::WeightInfo` is now bound to the Runtime as `pallet_$name::weights::SubstrateWeight`. As a result we have to the change the type definitions everywhere in our Runtime accordingly: - -```diff -- type WeightInfo = weights::pallet_$name::WeightInfo; -+ type WeightInfo = pallet_$name::weights::SubstrateWeight; -``` - -e.g. -```diff -- type WeightInfo = weights::pallet_collective::WeightInfo; -+ type WeightInfo = pallet_collective::weights::SubstrateWeight; -``` -and - -```diff -- type WeightInfo = weights::pallet_proxy::WeightInfo; -+ type WeightInfo = pallet_proxy::weights::SubstrateWeight; -``` - -And update the overall definition for weights on frame and a few related types and runtime parameters: - -```diff= - --const AVERAGE_ON_INITIALIZE_WEIGHT: Perbill = Perbill::from_percent(10); -+/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. -+/// This is used to limit the maximal weight of a single extrinsic. -+const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); -+/// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used -+/// by Operational extrinsics. -+const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -+/// We allow for 2 seconds of compute with a 6 second average block time. -+const MAXIMUM_BLOCK_WEIGHT: Weight = 2 * WEIGHT_PER_SECOND; -+ - parameter_types! { - pub const BlockHashCount: BlockNumber = 2400; -- /// We allow for 2 seconds of compute with a 6 second average block time. -- pub const MaximumBlockWeight: Weight = 2 * WEIGHT_PER_SECOND; -- pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); -- /// Assume 10% of weight for average on_initialize calls. -- pub MaximumExtrinsicWeight: Weight = -- AvailableBlockRatio::get().saturating_sub(AVERAGE_ON_INITIALIZE_WEIGHT) -- * MaximumBlockWeight::get(); -- pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - pub const Version: RuntimeVersion = VERSION; --} -- --const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); -- --impl frame_system::Trait for Runtime { -+ pub RuntimeBlockLength: BlockLength = -+ BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); -+ pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() -+ .base_block(BlockExecutionWeight::get()) -+ .for_class(DispatchClass::all(), |weights| { -+ weights.base_extrinsic = ExtrinsicBaseWeight::get(); -+ }) -+ .for_class(DispatchClass::Normal, |weights| { -+ weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); -+ }) -+ .for_class(DispatchClass::Operational, |weights| { -+ weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); -+ // Operational transactions have some extra reserved space, so that they -+ // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. -+ weights.reserved = Some( -+ MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT -+ ); -+ }) -+ .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) -+ .build_or_panic(); -+} -+ -+const_assert!(NORMAL_DISPATCH_RATIO.deconstruct() >= AVERAGE_ON_INITIALIZE_RATIO.deconstruct()); -+ -+impl frame_system::Config for Runtime { - type BaseCallFilter = (); -+ type BlockWeights = RuntimeBlockWeights; -+ type BlockLength = RuntimeBlockLength; -+ type DbWeight = RocksDbWeight; - type Origin = Origin; - type Call = Call; - type Index = Index; -@@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { - type Header = generic::Header; - type Event = Event; - type BlockHashCount = BlockHashCount; -- type MaximumBlockWeight = MaximumBlockWeight; -- type DbWeight = RocksDbWeight; -- type BlockExecutionWeight = BlockExecutionWeight; -- type ExtrinsicBaseWeight = ExtrinsicBaseWeight; -- type MaximumExtrinsicWeight = MaximumExtrinsicWeight; -- type MaximumBlockLength = MaximumBlockLength; -- type AvailableBlockRatio = AvailableBlockRatio; - type Version = Version; - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); -- type SystemWeightInfo = weights::frame_system::WeightInfo; -+ type SystemWeightInfo = frame_system::weights::SubstrateWeight; -``` - -#### Pallets: - -##### Assets - -The assets pallet has seen a variety of changes: -- [Features needed for reserve-backed stablecoins #7152 ](https://github.com/paritytech/substrate/pull/7152) -- [Freeze Assets and Asset Metadata #7346 ](https://github.com/paritytech/substrate/pull/7346) -- [Introduces account existence providers reference counting #7363 ]((https://github.com/paritytech/substrate/pull/7363)) - -have all altered the feature set and changed the concepts. However, it has some of the best documentation and explains the current state very well. If you are using the assets pallet and need to upgrade from an earlier version, we recommend you use the current docs to guide your way! - -##### Contracts - -As noted in the changelog, the `contracts`-pallet is still undergoing massive changes and is not yet part of this release. We are expecting for it to be released a few weeks after. If your chain is dependent on this pallet, we recommend to wait until it has been released as the currently released version is not compatible with FRAME 2.0. - -#### (changes) Treasury - -As mentioned above, Bounties, Tips and Lottery have been extracted out of treasury into their own pallets - removing these options here. Secondly we must now specify the `BurnDestination` and `SpendFunds`, which now go the `Bounties`. - -```diff -- type Tippers = Elections; -- type TipCountdown = TipCountdown; -- type TipFindersFee = TipFindersFee; -- type TipReportDepositBase = TipReportDepositBase; -- type DataDepositPerByte = DataDepositPerByte; - type Event = Event; - type OnSlash = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type SpendPeriod = SpendPeriod; - type Burn = Burn; -+ type BurnDestination = (); -+ type SpendFunds = Bounties; -``` - -Factoring out Bounties and Tips means most of these definitions have now moved there, while the parameter types can be left as they were: - -###### 🆕 Bounties - -```rust= -impl pallet_bounties::Config for Runtime { - type Event = Event; - type BountyDepositBase = BountyDepositBase; - type BountyDepositPayoutDelay = BountyDepositPayoutDelay; - type BountyUpdatePeriod = BountyUpdatePeriod; - type BountyCuratorDeposit = BountyCuratorDeposit; - type BountyValueMinimum = BountyValueMinimum; - type DataDepositPerByte = DataDepositPerByte; - type MaximumReasonLength = MaximumReasonLength; - type WeightInfo = pallet_bounties::weights::SubstrateWeight; - } -``` - -###### 🆕 Tips - -```rust= -impl pallet_tips::Config for Runtime { - type Event = Event; - type DataDepositPerByte = DataDepositPerByte; - type MaximumReasonLength = MaximumReasonLength; - type Tippers = Elections; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type WeightInfo = pallet_tips::weights::SubstrateWeight; - } -``` - -#### `FinalityTracker` removed - -Finality Tracker has been removed in favor of a different approach to handle the issue in GRANDPA, [see #7228 for details](https://github.com/paritytech/substrate/pull/7228). With latest GRANDPA this is not needed anymore and can be removed without worry. - -#### (changes) Elections Phragmen - -The pallet has been moved to a new system in which the exact amount of deposit for each voter, candidate, member, or runner-up is now deposited on-chain. Moreover, the concept of a `defunct_voter` is removed, since votes now have adequet deposit associated with them. A number of configuration parameters has changed to reflect this, as shown below: - -```diff= - parameter_types! { - pub const CandidacyBond: Balance = 10 * DOLLARS; -- pub const VotingBond: Balance = 1 * DOLLARS; -+ // 1 storage item created, key size is 32 bytes, value size is 16+16. -+ pub const VotingBondBase: Balance = deposit(1, 64); -+ // additional data per vote is 32 bytes (account id). -+ pub const VotingBondFactor: Balance = deposit(0, 32); - pub const TermDuration: BlockNumber = 7 * DAYS; - pub const DesiredMembers: u32 = 13; - pub const DesiredRunnersUp: u32 = 7; - -@@ -559,16 +600,16 @@ impl pallet_elections_phragmen::Trait for Runtime { - // NOTE: this implies that council's genesis members cannot be set directly and must come from - // this module. - type InitializeMembers = Council; -- type CurrencyToVote = CurrencyToVoteHandler; -+ type CurrencyToVote = U128CurrencyToVote; - type CandidacyBond = CandidacyBond; -- type VotingBond = VotingBond; -+ type VotingBondBase = VotingBondBase; -+ type VotingBondFactor = VotingBondFactor; - type LoserCandidate = (); -- type BadReport = (); - type KickedMember = (); - type DesiredMembers = DesiredMembers; - type DesiredRunnersUp = DesiredRunnersUp; - type TermDuration = TermDuration; - ``` - - **This upgrade requires storage [migration](https://github.com/paritytech/substrate/blob/master/frame/elections-phragmen/src/migrations_3_0_0.rs)**. Further details can be found in the [pallet-specific changelog](https://github.com/paritytech/substrate/blob/master/frame/elections-phragmen/CHANGELOG.md#security). - -#### (changes) Democracy - -Democracy brings three new settings with this release, all to allow for better influx- and spam-control. Namely these allow to specify the maximum number of proposals at a time, who can blacklist and who can cancel proposals. This diff acts as a good starting point: - -```diff= -@@ -508,6 +537,14 @@ impl pallet_democracy::Trait for Runtime { - type FastTrackVotingPeriod = FastTrackVotingPeriod; - // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; -+ // To cancel a proposal before it has been passed, the technical committee must be unanimous or -+ // Root must agree. -+ type CancelProposalOrigin = EnsureOneOf< -+ AccountId, -+ EnsureRoot, -+ pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>, -+ >; -+ type BlacklistOrigin = EnsureRoot; - // Any single technical committee member may veto a coming council proposal, however they can - // only do it once and it lasts only for the cooloff period. - type VetoOrigin = pallet_collective::EnsureMember; -@@ -518,7 +555,8 @@ impl pallet_democracy::Trait for Runtime { - type Scheduler = Scheduler; - type PalletsOrigin = OriginCaller; - type MaxVotes = MaxVotes; -+ type MaxProposals = MaxProposals; - } -``` - ----- - -### Primitives - -The shared primitives define the API between Client and Runtime. Usually, you don't have to touch nor directly interact with them, unless you created your own client or frame-less runtime. Therefore we'd expect you to understand whether you are effected by changes and how to update your code yourself. - ----- - -### Client - -#### CLI - -A few minor things have changed in the `cli` (compared to 2.0.1): - -1. we've [replaced the newly added `BuildSyncSpec` subcommand with an RPC API](https://github.com/paritytech/substrate/commit/65cc9af9b8df8d36928f6144ee7474cefbd70454#diff-c57da6fbeff8c46ce15f55ea42fedaa5a4684d79578006ce4af01ae04fd6b8f8) in an on-going effort to make light-client-support smoother, see below -2. we've [removed double accounts from our chainspec-builder](https://github.com/paritytech/substrate/commit/31499cd29ed30df932fb71b7459796f7160d0272) -3. we [don't fallback to `--chain flaming-fir` anymore](https://github.com/paritytech/substrate/commit/13cdf1c8cd2ee62d411f82b64dc7eba860c9c6c6), if no chain is given our substrate-node will error. -4. [the `subkey`-integration has seen a fix to the `insert`-command](https://github.com/paritytech/substrate/commit/54bde60cfd2c544c54e9e8623b6b8725b99557f8) that requires you to now add the `&cli` as a param. - ```diff= - --- a/bin/node/cli/src/command.rs - +++ b/bin/node/cli/src/command.rs - @@ -92,7 +97,7 @@ pub fn run() -> Result<()> { - You can enable it with `--features runtime-benchmarks`.".into()) - } - } - - Some(Subcommand::Key(cmd)) => cmd.run(), - + Some(Subcommand::Key(cmd)) => cmd.run(&cli), - Some(Subcommand::Sign(cmd)) => cmd.run(), - Some(Subcommand::Verify(cmd)) => cmd.run(), - Some(Subcommand::Vanity(cmd)) => cmd.run(), - ``` - - -#### Service Builder Upgrades - -##### Light client support - -As said, we've added a new optional RPC service for improved light client support. For that to work, we need to pass the `chain_spec` and give access to the `AuxStore` to our `rpc`: - - -```diff= - ---- a/bin/node/rpc/src/lib.rs -+++ b/bin/node/rpc/src/lib.rs -@@ -49,6 +49,7 @@ use sp_consensus::SelectChain; - use sp_consensus_babe::BabeApi; - use sc_rpc::SubscriptionTaskExecutor; - use sp_transaction_pool::TransactionPool; -+use sc_client_api::AuxStore; - - /// Light client extra dependencies. - pub struct LightDeps { -@@ -94,6 +95,8 @@ pub struct FullDeps { - pub pool: Arc

::storage_version(); + log::info!( + target: "runtime::elections-phragmen", + "Running migration to v4 for elections-phragmen with storage version {:?}", + maybe_storage_version, + ); + + match maybe_storage_version { + Some(storage_version) if storage_version <= PalletVersion::new(3, 0, 0) => { + log::info!("new prefix: {}", new_pallet_name.as_ref()); + frame_support::storage::migration::move_pallet( + OLD_PREFIX, + new_pallet_name.as_ref().as_bytes(), + ); + ::BlockWeights::get().max_block + } + _ => { + log::warn!( + target: "runtime::elections-phragmen", + "Attempted to apply migration to v4 but failed because storage version is {:?}", + maybe_storage_version, + ); + 0 + }, + } +} + +/// Some checks prior to migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::pre_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn pre_migration>(new: N) { + let new = new.as_ref(); + log::info!("pre-migration elections-phragmen test with new = {}", new); + + // the next key must exist, and start with the hash of `OLD_PREFIX`. + let next_key = sp_io::storage::next_key(OLD_PREFIX).unwrap(); + assert!(next_key.starts_with(&sp_io::hashing::twox_128(OLD_PREFIX))); + + // ensure nothing is stored in the new prefix. + assert!( + sp_io::storage::next_key(new.as_bytes()).map_or( + // either nothing is there + true, + // or we ensure that it has no common prefix with twox_128(new). + |next_key| !next_key.starts_with(&sp_io::hashing::twox_128(new.as_bytes())) + ), + "unexpected next_key({}) = {:?}", + new, + sp_core::hexdisplay::HexDisplay::from(&sp_io::storage::next_key(new.as_bytes()).unwrap()) + ); + // ensure storage version is 3. + assert!(

::storage_version().unwrap().major == 3); +} + +/// Some checks for after migration. This can be linked to +/// [`frame_support::traits::OnRuntimeUpgrade::post_upgrade`] for further testing. +/// +/// Panics if anything goes wrong. +pub fn post_migration

() { + log::info!("post-migration elections-phragmen"); + // ensure we've been updated to v4 by the automatic write of crate version -> storage version. + assert!(

::storage_version().unwrap().major == 4); +} diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index ac3c709300f51..d4b84f5bb1565 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -32,7 +31,6 @@ std = [ "codec/std", "sp-core/std", "sp-std/std", - "serde", "sp-io/std", "frame-support/std", "sp-runtime/std", diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index d6b68bbf5a043..46ec62bf75174 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -156,7 +156,7 @@ pub trait Config: frame_system::Config { type Event: From> + Into<::Event>; /// Identifier for the elections pallet's lock - type ModuleId: Get; + type PalletId: Get; /// The currency that people are electing with. type Currency: @@ -391,7 +391,7 @@ decl_module! { /// The chunk size of the approval vector. const APPROVAL_SET_SIZE: u32 = APPROVAL_SET_SIZE as u32; - const ModuleId: LockIdentifier = T::ModuleId::get(); + const PalletId: LockIdentifier = T::PalletId::get(); fn deposit_event() = default; @@ -491,7 +491,7 @@ decl_module! { ); T::Currency::remove_lock( - T::ModuleId::get(), + T::PalletId::get(), if valid { &who } else { &reporter } ); @@ -529,7 +529,7 @@ decl_module! { Self::remove_voter(&who, index); T::Currency::unreserve(&who, T::VotingBond::get()); - T::Currency::remove_lock(T::ModuleId::get(), &who); + T::Currency::remove_lock(T::PalletId::get(), &who); } /// Submit oneself for candidacy. @@ -890,7 +890,7 @@ impl Module { } T::Currency::set_lock( - T::ModuleId::get(), + T::PalletId::get(), &who, locked_balance, WithdrawReasons::all(), diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index 287eaa27b196a..896fd40020e41 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -58,6 +58,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -102,7 +103,7 @@ impl ChangeMembers for TestChangeMembers { } parameter_types!{ - pub const ElectionModuleId: LockIdentifier = *b"py/elect"; + pub const ElectionPalletId: LockIdentifier = *b"py/elect"; } impl elections::Config for Test { @@ -122,7 +123,7 @@ impl elections::Config for Test { type InactiveGracePeriod = InactiveGracePeriod; type VotingPeriod = VotingPeriod; type DecayRatio = DecayRatio; - type ModuleId = ElectionModuleId; + type PalletId = ElectionPalletId; } pub type Block = sp_runtime::generic::Block; diff --git a/frame/erc20/src/mock.rs b/frame/erc20/src/mock.rs index 3e811b8352067..e35085a5c65fe 100644 --- a/frame/erc20/src/mock.rs +++ b/frame/erc20/src/mock.rs @@ -9,7 +9,7 @@ use sp_core::H256; use sp_runtime::{ testing::Header, traits::{AccountIdConversion, BlakeTwo256, Block as BlockT, IdentityLookup}, - ModuleId, Perbill, + Perbill, }; use crate::{self as example, Trait}; @@ -124,7 +124,7 @@ pub const RELAYER_C: u64 = 0x4; pub const ENDOWED_BALANCE: u64 = 100_000_000; pub fn new_test_ext() -> sp_io::TestExternalities { - let bridge_id = ModuleId(*b"cb/bridg").into_account(); + let bridge_id = PalletId(*b"cb/bridg").into_account(); let mut t = frame_system::GenesisConfig::default() .build_storage::() .unwrap(); diff --git a/frame/erc721/src/mock.rs b/frame/erc721/src/mock.rs index a07fd1a14ebc9..d23ce61f37fb6 100644 --- a/frame/erc721/src/mock.rs +++ b/frame/erc721/src/mock.rs @@ -44,6 +44,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { diff --git a/frame/example-parallel/src/tests.rs b/frame/example-parallel/src/tests.rs index e82d75e632068..56cb73ebb08bb 100644 --- a/frame/example-parallel/src/tests.rs +++ b/frame/example-parallel/src/tests.rs @@ -66,6 +66,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index de741294b9c16..258648b52e5b7 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example" -version = "2.0.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -21,8 +20,8 @@ pallet-balances = { version = "3.0.0", default-features = false, path = "../bala sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } - frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } @@ -30,14 +29,14 @@ sp-core = { version = "3.0.0", path = "../../primitives/core", default-features [features] default = ["std"] std = [ - "serde", "codec/std", - "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "sp-io/std", + "sp-runtime/std", "sp-std/std" ] runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/example/src/benchmarking.rs b/frame/example/src/benchmarking.rs new file mode 100644 index 0000000000000..64602ca41cee9 --- /dev/null +++ b/frame/example/src/benchmarking.rs @@ -0,0 +1,76 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarking for pallet-example. + +#![cfg(feature = "runtime-benchmarks")] + +use crate::*; +use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; +use frame_system::RawOrigin; + +// To actually run this benchmark on pallet-example, we need to put this pallet into the +// runtime and compile it with `runtime-benchmarks` feature. The detail procedures are +// documented at: +// https://substrate.dev/docs/en/knowledgebase/runtime/benchmarking#how-to-benchmark +// +// The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. +// The exact command of how the estimate generated is printed at the top of the file. + +// Details on using the benchmarks macro can be seen at: +// https://substrate.dev/rustdocs/v3.0.0/frame_benchmarking/macro.benchmarks.html +benchmarks!{ + // This will measure the execution time of `set_dummy` for b in [1..1000] range. + set_dummy_benchmark { + // This is the benchmark setup phase + let b in 1 .. 1000; + }: set_dummy(RawOrigin::Root, b.into()) // The execution phase is just running `set_dummy` extrinsic call + verify { + // This is the optional benchmark verification phase, asserting certain states. + assert_eq!(Pallet::::dummy(), Some(b.into())) + } + + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same + // as the extrinsic call. `_(...)` is used to represent the extrinsic name. + // The benchmark verification phase is omitted. + accumulate_dummy { + let b in 1 .. 1000; + // The caller account is whitelisted for DB reads/write by the benchmarking macro. + let caller: T::AccountId = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of sorting a vector. + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + // The benchmark execution phase could also be a closure with custom code + m.sort(); + } +} + +// This line generates test cases for benchmarking, and could be run by: +// `cargo test -p pallet-example --all-features`, you will see an additional line of: +// `test benchmarking::benchmark_tests::test_benchmarks ... ok` in the result. +// +// The line generates three steps per benchmark, with repeat=1 and the three steps are +// [low, mid, high] of the range. +impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 86e9b7fdc0c18..fd1bc292ac8aa 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -255,28 +255,45 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::marker::PhantomData; +use sp_std::{ + prelude::*, + marker::PhantomData +}; use frame_support::{ dispatch::DispatchResult, traits::IsSubType, weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays}, }; -use sp_std::prelude::*; use frame_system::{ensure_signed}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{ - SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, + SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, Saturating }, transaction_validity::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, }, }; +use log::info; + +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; + +#[cfg(test)] +mod tests; + +mod benchmarking; +pub mod weights; +pub use weights::*; + +/// A type alias for the balance type from this pallet's point of view. +type BalanceOf = ::Balance; +const MILLICENTS: u32 = 1_000_000_000; // A custom weight calculator tailored for the dispatch call `set_dummy()`. This actually examines // the arguments and makes a decision based upon them. // // The `WeightData` trait has access to the arguments of the dispatch that it wants to assign a -// weight to. Nonetheless, the trait itself can not make any assumptions about what the generic type +// weight to. Nonetheless, the trait itself cannot make any assumptions about what the generic type // of the arguments (`T`) is. Based on our needs, we could replace `T` with a more concrete type // while implementing the trait. The `pallet::weight` expects whatever implements `WeighData` to // replace `T` with a tuple of the dispatch arguments. This is exactly how we will craft the @@ -286,13 +303,22 @@ use sp_runtime::{ // - The final weight of each dispatch is calculated as the argument of the call multiplied by the // parameter given to the `WeightForSetDummy`'s constructor. // - assigns a dispatch class `operational` if the argument of the call is more than 1000. +// +// More information can be read at: +// - https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight +// - https://substrate.dev/docs/en/knowledgebase/runtime/fees#default-weight-annotations +// +// Manually configuring weight is an advanced operation and what you really need may well be +// fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. struct WeightForSetDummy(BalanceOf); impl WeighData<(&BalanceOf,)> for WeightForSetDummy { fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { let multiplier = self.0; - (*target.0 * multiplier).saturated_into::() + // *target.0 is the amount passed into the extrinsic + let cents = *target.0 / >::from(MILLICENTS); + (cents * multiplier).saturated_into::() } } @@ -312,12 +338,6 @@ impl PaysFee<(&BalanceOf,)> for WeightForSetDummy } } -/// A type alias for the balance type from this pallet's point of view. -type BalanceOf = ::Balance; - -// Re-export pallet items so that they can be accessed from the crate namespace. -pub use pallet::*; - // Definition of the pallet logic, to be aggregated at runtime definition through // `construct_runtime`. #[frame_support::pallet] @@ -334,8 +354,15 @@ pub mod pallet { /// `frame_system::Config` should always be included. #[pallet::config] pub trait Config: pallet_balances::Config + frame_system::Config { + // Setting a constant config parameter from the runtime + #[pallet::constant] + type MagicNumber: Get; + /// The overarching event type. type Event: From> + IsType<::Event>; + + /// Type representing the weight of this pallet + type WeightInfo: WeightInfo; } // Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and @@ -354,14 +381,12 @@ pub mod pallet { fn on_initialize(_n: T::BlockNumber) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. - 0 } // `on_finalize` is executed at the end of block after all extrinsic are dispatched. fn on_finalize(_n: T::BlockNumber) { - // We just kill our dummy storage item. - >::kill(); + // Perform necessary data/state clean up here. } // A runtime code run after every block and have access to extended set of APIs. @@ -370,7 +395,9 @@ pub mod pallet { fn offchain_worker(_n: T::BlockNumber) { // We don't do anything here. // but we could dispatch extrinsic (transaction/unsigned/inherent) using - // sp_io::submit_extrinsic + // sp_io::submit_extrinsic. + // To see example on offchain worker, please refer to example-offchain-worker pallet + // accompanied in this repository. } } @@ -455,11 +482,16 @@ pub mod pallet { // difficulty) of the transaction and the latter demonstrates the [`DispatchClass`] of the // call. A higher weight means a larger transaction (less of which can be placed in a // single block). - #[pallet::weight(0)] + // + // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the benchmark + // toolchain. + #[pallet::weight( + ::WeightInfo::accumulate_dummy((*increase_by).saturated_into()) + )] pub(super) fn accumulate_dummy( origin: OriginFor, increase_by: T::Balance - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { // This is a public call, so we ensure that the origin is some signed account. let _sender = ensure_signed(origin)?; @@ -478,15 +510,16 @@ pub mod pallet { // Here's the new one of read and then modify the value. >::mutate(|dummy| { - let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + // Using `saturating_add` instead of a regular `+` to avoid overflowing + let new_dummy = dummy.map_or(increase_by, |d| d.saturating_add(increase_by)); *dummy = Some(new_dummy); }); // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(Event::Dummy(increase_by)); + Self::deposit_event(Event::AccumulateDummy(increase_by)); // All good, no refund. - Ok(().into()) + Ok(()) } /// A privileged call; in this case it resets our dummy value to something new. @@ -496,17 +529,28 @@ pub mod pallet { // calls to be executed - we don't need to care why. Because it's privileged, we can // assume it's a one-off operation and substantial processing/storage/memory can be used // without worrying about gameability or attack scenarios. + // + // The weight for this extrinsic we use our own weight object `WeightForSetDummy` to determine + // its weight #[pallet::weight(WeightForSetDummy::(>::from(100u32)))] - fn set_dummy( + pub(super) fn set_dummy( origin: OriginFor, #[pallet::compact] new_value: T::Balance, - ) -> DispatchResultWithPostInfo { + ) -> DispatchResult { ensure_root(origin)?; + + // Print out log or debug message in the console via log::{error, warn, info, debug, trace}, + // accepting format strings similar to `println!`. + // https://substrate.dev/rustdocs/v3.0.0/log/index.html + info!("New value is now: {:?}", new_value); + // Put the new value into storage. >::put(new_value); + Self::deposit_event(Event::SetDummy(new_value)); + // All good, no refund. - Ok(().into()) + Ok(()) } } @@ -520,7 +564,9 @@ pub mod pallet { pub enum Event { // Just a normal `enum`, here's a dummy event to ensure it compiles. /// Dummy event, just here so there's a generic type that's used. - Dummy(BalanceOf), + AccumulateDummy(BalanceOf), + SetDummy(BalanceOf), + SetBar(T::AccountId, BalanceOf), } // pallet::storage attributes allow for type-safe usage of the Substrate storage database, @@ -545,14 +591,13 @@ pub mod pallet { // A map that has enumerable entries. #[pallet::storage] #[pallet::getter(fn bar)] - pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance>; // this one uses the query kind: `ValueQuery`, we'll demonstrate the usage of 'mutate' API. #[pallet::storage] #[pallet::getter(fn foo)] pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; - // The genesis config type. #[pallet::genesis_config] pub struct GenesisConfig { @@ -600,7 +645,7 @@ impl Pallet { let prev = >::get(); // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. let result = >::mutate(|foo| { - *foo = *foo + increase_by; + *foo = foo.saturating_add(increase_by); *foo }); assert!(prev + increase_by == result); @@ -640,11 +685,11 @@ impl Pallet { // types defined in the runtime. Lookup `pub type SignedExtra = (...)` in `node/runtime` and // `node-template` for an example of this. -/// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the -/// priority and prints some log. -/// -/// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No -/// particular reason why, just to demonstrate the power of signed extensions. +// A simple signed extension that checks for the `set_dummy` call. In that case, it increases the +// priority and prints some log. +// +// Additionally, it drops any transaction with an encoded length higher than 200 bytes. No +// particular reason why, just to demonstrate the power of signed extensions. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct WatchDummy(PhantomData); @@ -691,201 +736,3 @@ where } } } - -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking { - use super::*; - use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; - use frame_system::RawOrigin; - - benchmarks!{ - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. - accumulate_dummy { - let b in 1 .. 1000; - let caller = account("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..1000] range. - set_dummy { - let b in 1 .. 1000; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..10] range. - another_set_dummy { - let b in 1 .. 10; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of sorting a vector. - sort_vector { - let x in 0 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); - } - }: { - m.sort(); - } - } - - impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); -} - -#[cfg(test)] -mod tests { - use super::*; - - use frame_support::{ - assert_ok, parameter_types, - weights::{DispatchInfo, GetDispatchInfo}, traits::{OnInitialize, OnFinalize} - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - testing::Header, BuildStorage, - traits::{BlakeTwo256, IdentityLookup}, - }; - // Reexport crate as its pallet name for construct_runtime. - use crate as pallet_example; - - type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; - type Block = frame_system::mocking::MockBlock; - - // For testing the pallet, we construct a mock runtime. - frame_support::construct_runtime!( - pub enum Test where - Block = Block, - NodeBlock = Block, - UncheckedExtrinsic = UncheckedExtrinsic, - { - System: frame_system::{Pallet, Call, Config, Storage, Event}, - Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Example: pallet_example::{Pallet, Call, Storage, Config, Event}, - } - ); - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub BlockWeights: frame_system::limits::BlockWeights = - frame_system::limits::BlockWeights::simple_max(1024); - } - impl frame_system::Config for Test { - type BaseCallFilter = (); - type BlockWeights = (); - type BlockLength = (); - type DbWeight = (); - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = Call; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type Version = (); - type PalletInfo = PalletInfo; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - type SystemWeightInfo = (); - type SS58Prefix = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Config for Test { - type MaxLocks = (); - type Balance = u64; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - type WeightInfo = (); - } - impl Config for Test { - type Event = Event; - } - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - pub fn new_test_ext() -> sp_io::TestExternalities { - let t = GenesisConfig { - // We use default for brevity, but you can configure as desired if needed. - frame_system: Default::default(), - pallet_balances: Default::default(), - pallet_example: pallet_example::GenesisConfig { - dummy: 42, - // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], - foo: 24, - }, - }.build_storage().unwrap(); - t.into() - } - - #[test] - fn it_works_for_optional_value() { - new_test_ext().execute_with(|| { - // Check that GenesisBuilder works properly. - assert_eq!(Example::dummy(), Some(42)); - - // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); - assert_eq!(Example::dummy(), Some(69)); - - // Check that finalizing the block removes Dummy from storage. - >::on_finalize(1); - assert_eq!(Example::dummy(), None); - - // Check that accumulate works when we Dummy has None in it. - >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); - assert_eq!(Example::dummy(), Some(42)); - }); - } - - #[test] - fn it_works_for_default_value() { - new_test_ext().execute_with(|| { - assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); - assert_eq!(Example::foo(), 25); - }); - } - - #[test] - fn signed_ext_watch_dummy_works() { - new_test_ext().execute_with(|| { - let call = >::set_dummy(10).into(); - let info = DispatchInfo::default(); - - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 150) - .unwrap() - .priority, - u64::max_value(), - ); - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), - InvalidTransaction::ExhaustsResources.into(), - ); - }) - } - - #[test] - fn weights_work() { - // must have a defined weight. - let default_call = >::accumulate_dummy(10); - let info = default_call.get_dispatch_info(); - // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert_eq!(info.weight, 0); - - // must have a custom weight of `100 * arg = 2000` - let custom_call = >::set_dummy(20); - let info = custom_call.get_dispatch_info(); - assert_eq!(info.weight, 2000); - } -} diff --git a/frame/example/src/tests.rs b/frame/example/src/tests.rs new file mode 100644 index 0000000000000..496cd5701fe58 --- /dev/null +++ b/frame/example/src/tests.rs @@ -0,0 +1,190 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example. + +use crate::*; +use frame_support::{ + assert_ok, parameter_types, + weights::{DispatchInfo, GetDispatchInfo}, traits::OnInitialize +}; +use sp_core::H256; +// The testing primitives are very useful for avoiding having to work with signatures +// or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use sp_runtime::{ + testing::Header, BuildStorage, + traits::{BlakeTwo256, IdentityLookup}, +}; +// Reexport crate as its pallet name for construct_runtime. +use crate as pallet_example; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, + Example: pallet_example::{Pallet, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const MagicNumber: u64 = 1_000_000_000; +} +impl Config for Test { + type MagicNumber = MagicNumber; + type Event = Event; + type WeightInfo = (); +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = GenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + frame_system: Default::default(), + pallet_balances: Default::default(), + pallet_example: pallet_example::GenesisConfig { + dummy: 42, + // we configure the map with (key, value) pairs. + bar: vec![(1, 2), (2, 3)], + foo: 24, + }, + }.build_storage().unwrap(); + t.into() +} + +#[test] +fn it_works_for_optional_value() { + new_test_ext().execute_with(|| { + // Check that GenesisBuilder works properly. + let val1 = 42; + let val2 = 27; + assert_eq!(Example::dummy(), Some(val1)); + + // Check that accumulate works when we have Some value in Dummy already. + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val2)); + assert_eq!(Example::dummy(), Some(val1 + val2)); + + // Check that accumulate works when we Dummy has None in it. + >::on_initialize(2); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), val1)); + assert_eq!(Example::dummy(), Some(val1 + val2 + val1)); + }); +} + +#[test] +fn it_works_for_default_value() { + new_test_ext().execute_with(|| { + assert_eq!(Example::foo(), 24); + assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); + assert_eq!(Example::foo(), 25); + }); +} + +#[test] +fn set_dummy_works() { + new_test_ext().execute_with(|| { + let test_val = 133; + assert_ok!(Example::set_dummy(Origin::root(), test_val.into())); + assert_eq!(Example::dummy(), Some(test_val)); + }); +} + +#[test] +fn signed_ext_watch_dummy_works() { + new_test_ext().execute_with(|| { + let call = >::set_dummy(10).into(); + let info = DispatchInfo::default(); + + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, &info, 150) + .unwrap() + .priority, + u64::max_value(), + ); + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + InvalidTransaction::ExhaustsResources.into(), + ); + }) +} + +#[test] +fn weights_work() { + // must have a defined weight. + let default_call = >::accumulate_dummy(10); + let info1 = default_call.get_dispatch_info(); + // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` + assert!(info1.weight > 0); + + + // `set_dummy` is simpler than `accumulate_dummy`, and the weight + // should be less. + let custom_call = >::set_dummy(20); + let info2 = custom_call.get_dispatch_info(); + assert!(info1.weight > info2.weight); +} diff --git a/frame/example/src/weights.rs b/frame/example/src/weights.rs new file mode 100644 index 0000000000000..db6491335c76f --- /dev/null +++ b/frame/example/src/weights.rs @@ -0,0 +1,100 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_example +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-03-15, STEPS: `[100, ]`, REPEAT: 10, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// ./target/release/substrate +// benchmark +// --chain +// dev +// --execution +// wasm +// --wasm-execution +// compiled +// --pallet +// pallet_example +// --extrinsic +// * +// --steps +// 100 +// --repeat +// 10 +// --raw +// --output +// ./ +// --template +// ./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_example. +pub trait WeightInfo { + fn set_dummy_benchmark(b: u32, ) -> Weight; + fn accumulate_dummy(b: u32, ) -> Weight; + fn sort_vector(x: u32, ) -> Weight; +} + +/// Weights for pallet_example using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn sort_vector(x: u32, ) -> Weight { + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn set_dummy_benchmark(b: u32, ) -> Weight { + (5_834_000 as Weight) + .saturating_add((24_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn accumulate_dummy(b: u32, ) -> Weight { + (51_353_000 as Weight) + .saturating_add((14_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn sort_vector(x: u32, ) -> Weight { + (2_569_000 as Weight) + // Standard Error: 0 + .saturating_add((4_000 as Weight).saturating_mul(x as Weight)) + } +} diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 6a0042308736e..a923f926a0960 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -serde = { version = "1.0.101", optional = true } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-tracing = { version = "3.0.0", default-features = false, path = "../../primitives/tracing" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -31,6 +30,7 @@ pallet-indices = { version = "3.0.0", path = "../indices" } pallet-balances = { version = "3.0.0", path = "../balances" } pallet-transaction-payment = { version = "3.0.0", path = "../transaction-payment" } sp-version = { version = "3.0.0", path = "../../primitives/version" } +sp-inherents = { version = "3.0.0", path = "../../primitives/inherents" } [features] default = ["std"] @@ -41,7 +41,6 @@ std = [ "codec/std", "frame-support/std", "frame-system/std", - "serde", "sp-core/std", "sp-runtime/std", "sp-tracing/std", diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 277b20cf20bfa..bc2783f76b5df 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -119,7 +119,10 @@ use sp_std::{prelude::*, marker::PhantomData}; use frame_support::{ weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, - traits::{OnInitialize, OnIdle, OnFinalize, OnRuntimeUpgrade, OffchainWorker, ExecuteBlock}, + traits::{ + OnInitialize, OnIdle, OnFinalize, OnRuntimeUpgrade, OffchainWorker, ExecuteBlock, + EnsureInherentsAreFirst, + }, dispatch::PostDispatchInfo, }; use sp_runtime::{ @@ -153,7 +156,7 @@ pub struct Executive, Block: traits::Block, Context: Default, UnsignedValidator, @@ -181,7 +184,7 @@ where } impl< - System: frame_system::Config, + System: frame_system::Config + EnsureInherentsAreFirst, Block: traits::Block

, Context: Default, UnsignedValidator, @@ -311,6 +314,10 @@ where && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), "Parent hash should be valid.", ); + + if let Err(i) = System::ensure_inherents_are_first(block) { + panic!("Invalid inherent position for extrinsic at index {}", i); + } } /// Actually execute all transitions for `block`. @@ -528,7 +535,7 @@ mod tests { }, }; use frame_support::{ - parameter_types, + assert_err, parameter_types, weights::{Weight, RuntimeDbWeight, IdentityFee, WeightToFeePolynomial}, traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, }; @@ -543,7 +550,7 @@ mod tests { mod custom { use frame_support::weights::{Weight, DispatchClass}; use sp_runtime::transaction_validity::{ - UnknownTransaction, TransactionSource, TransactionValidity + UnknownTransaction, TransactionSource, TransactionValidity, TransactionValidityError, }; pub trait Config: frame_system::Config {} @@ -553,25 +560,30 @@ mod tests { #[weight = 100] fn some_function(origin) { // NOTE: does not make any different. - let _ = frame_system::ensure_signed(origin); + frame_system::ensure_signed(origin)?; } #[weight = (200, DispatchClass::Operational)] fn some_root_operation(origin) { - let _ = frame_system::ensure_root(origin); + frame_system::ensure_root(origin)?; } #[weight = 0] fn some_unsigned_message(origin) { - let _ = frame_system::ensure_none(origin); + frame_system::ensure_none(origin)?; } #[weight = 0] fn allowed_unsigned(origin) { - let _ = frame_system::ensure_root(origin)?; + frame_system::ensure_root(origin)?; } #[weight = 0] fn unallowed_unsigned(origin) { - let _ = frame_system::ensure_root(origin)?; + frame_system::ensure_root(origin)?; + } + + #[weight = 0] + fn inherent_call(origin) { + let _ = frame_system::ensure_none(origin)?; } // module hooks. @@ -600,16 +612,29 @@ mod tests { } #[weight = 0] - fn calculate_storage_root(origin) { + fn calculate_storage_root(_origin) { let root = sp_io::storage::root(); sp_io::storage::set("storage_root".as_bytes(), &root); } } } + impl sp_inherents::ProvideInherent for Module { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; + fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + None + } + fn is_inherent(call: &Self::Call) -> bool { + *call == Call::::inherent_call() + } + } + impl sp_runtime::traits::ValidateUnsigned for Module { type Call = Call; + // Inherent call is not validated as unsigned fn validate_unsigned( _source: TransactionSource, call: &Self::Call, @@ -618,6 +643,18 @@ mod tests { Call::allowed_unsigned(..) => Ok(Default::default()), _ => UnknownTransaction::NoUnsignedValidator.into(), } + + } + + // Inherent call is accepted for being dispatched + fn pre_dispatch( + call: &Self::Call, + ) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned(..) => Ok(()), + Call::inherent_call(..) => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } } } } @@ -630,7 +667,7 @@ mod tests { { System: frame_system::{Pallet, Call, Config, Storage, Event}, Balances: pallet_balances::{Pallet, Call, Storage, Config, Event}, - Custom: custom::{Pallet, Call, ValidateUnsigned}, + Custom: custom::{Pallet, Call, ValidateUnsigned, Inherent}, } ); @@ -670,6 +707,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } type Balance = u64; @@ -717,12 +755,7 @@ mod tests { ); type TestXt = sp_runtime::testing::TestXt; type TestBlock = Block; - type TestUncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< - ::AccountId, - ::Call, - (), - SignedExtra, - >; + type TestUncheckedExtrinsic = TestXt; // Will contain `true` when the custom runtime logic was called. const CUSTOM_ON_RUNTIME_KEY: &[u8] = &*b":custom:on_runtime"; @@ -856,7 +889,9 @@ mod tests { [69u8; 32].into(), Digest::default(), )); - assert!(Executive::apply_extrinsic(xt).is_err()); + assert_err!(Executive::apply_extrinsic(xt), + TransactionValidityError::Invalid(InvalidTransaction::Future) + ); assert_eq!(>::extrinsic_index(), Some(0)); }); } @@ -1226,4 +1261,57 @@ mod tests { Executive::execute_block(Block::new(header, vec![xt])); }); } + + #[test] + #[should_panic(expected = "Invalid inherent position for extrinsic at index 1")] + fn invalid_inherent_position_fail() { + let xt1 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); + let xt2 = TestXt::new(Call::Custom(custom::Call::inherent_call()), None); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); + } + + #[test] + fn valid_inherents_position_works() { + let xt1 = TestXt::new(Call::Custom(custom::Call::inherent_call()), None); + let xt2 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); + + let header = new_test_ext(1).execute_with(|| { + // Let's build some fake block. + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + Executive::apply_extrinsic(xt1.clone()).unwrap().unwrap(); + Executive::apply_extrinsic(xt2.clone()).unwrap().unwrap(); + + Executive::finalize_block() + }); + + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block::new(header, vec![xt1, xt2])); + }); + } } diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml index 4df0dc49aaf93..0b40f6ad4d6da 100644 --- a/frame/gilt/Cargo.toml +++ b/frame/gilt/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -30,7 +29,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs index 1abb92ed3dfac..f5c0d3a5aabef 100644 --- a/frame/gilt/src/mock.rs +++ b/frame/gilt/src/mock.rs @@ -70,6 +70,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = SS58Prefix; + type OnSetCode = (); } parameter_types! { diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 547e3966d52a4..a602e8b6daddf 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } @@ -44,7 +43,6 @@ frame-election-provider-support = { version = "3.0.0", path = "../election-provi [features] default = ["std"] std = [ - "serde", "codec/std", "frame-benchmarking/std", "sp-application-crypto/std", diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index eb3dc4f110acb..7cfc1d61baf2f 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -518,21 +518,13 @@ impl Module { None } else { let session_index = - if let Some(session_id) = Self::session_for_set(set_id - 1) { - session_id - } else { - return Err(Error::::InvalidEquivocationProof.into()); - }; + Self::session_for_set(set_id - 1).ok_or_else(|| Error::::InvalidEquivocationProof)?; Some(session_index) }; let set_id_session_index = - if let Some(session_id) = Self::session_for_set(set_id) { - session_id - } else { - return Err(Error::::InvalidEquivocationProof.into()); - }; + Self::session_for_set(set_id).ok_or_else(|| Error::::InvalidEquivocationProof)?; // check that the session id for the membership proof is within the // bounds of the set id reported in the equivocation. diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 3f450e18bc783..d59d0d19d0e87 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -97,6 +97,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl frame_system::offchain::SendTransactionTypes for Test diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index 50462d33472a9..92d2c6c751a24 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -24,7 +24,7 @@ use crate::mock::*; use codec::{Decode, Encode}; use fg_primitives::ScheduledChange; use frame_support::{ - assert_err, assert_ok, + assert_err, assert_ok, assert_noop, traits::{Currency, OnFinalize, OneSessionHandler}, weights::{GetDispatchInfo, Pays}, }; @@ -100,21 +100,27 @@ fn cannot_schedule_change_when_one_pending() { initialize_block(1, Default::default()); Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); Grandpa::on_finalize(1); let header = System::finalize(); initialize_block(2, header.hash()); assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); Grandpa::on_finalize(2); let header = System::finalize(); initialize_block(3, header.hash()); assert!(!>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); + assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(3); let _header = System::finalize(); @@ -148,7 +154,10 @@ fn dispatch_forced_change() { ).unwrap(); assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), + Error::::ChangePending + ); Grandpa::on_finalize(1); let mut header = System::finalize(); @@ -157,8 +166,14 @@ fn dispatch_forced_change() { initialize_block(i, header.hash()); assert!(>::get().unwrap().forced.is_some()); assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)), + Error::::ChangePending + ); Grandpa::on_finalize(i); header = System::finalize(); @@ -170,7 +185,7 @@ fn dispatch_forced_change() { initialize_block(7, header.hash()); assert!(!>::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); + assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None)); Grandpa::on_finalize(7); header = System::finalize(); } @@ -180,7 +195,10 @@ fn dispatch_forced_change() { initialize_block(8, header.hash()); assert!(>::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None), + Error::::ChangePending + ); Grandpa::on_finalize(8); header = System::finalize(); } @@ -192,7 +210,10 @@ fn dispatch_forced_change() { assert!(!>::exists()); assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)).is_err()); + assert_noop!( + Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)), + Error::::TooSoon + ); Grandpa::on_finalize(i); header = System::finalize(); } @@ -200,7 +221,7 @@ fn dispatch_forced_change() { { initialize_block(11, header.hash()); assert!(!>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0)).is_ok()); + assert_ok!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0))); assert_eq!(Grandpa::next_forced(), Some(21)); Grandpa::on_finalize(11); header = System::finalize(); @@ -231,7 +252,7 @@ fn schedule_pause_only_when_live() { initialize_block(2, Default::default()); // signaling a pause now should fail - assert!(Grandpa::schedule_pause(1).is_err()); + assert_noop!(Grandpa::schedule_pause(1), Error::::PauseFailed); Grandpa::on_finalize(2); let _ = System::finalize(); @@ -250,7 +271,7 @@ fn schedule_resume_only_when_paused() { initialize_block(1, Default::default()); // the set is currently live, resuming it is an error - assert!(Grandpa::schedule_resume(1).is_err()); + assert_noop!(Grandpa::schedule_resume(1), Error::::ResumeFailed); assert_eq!( Grandpa::state(), diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 08109fda2584c..fce79c56f80a9 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -30,7 +29,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-io/std", diff --git a/frame/identity/src/tests.rs b/frame/identity/src/tests.rs index a996c989a9185..937fa8f130d80 100644 --- a/frame/identity/src/tests.rs +++ b/frame/identity/src/tests.rs @@ -71,6 +71,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 4c5b4a8863bcd..2e816a6bb8564 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -18,7 +18,6 @@ pallet-authorship = { version = "3.0.0", default-features = false, path = "../au codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } @@ -39,7 +38,6 @@ std = [ "codec/std", "sp-core/std", "sp-std/std", - "serde", "sp-io/std", "sp-runtime/std", "sp-staking/std", diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 35028dd89df4e..4f21012abc510 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -140,6 +140,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { diff --git a/frame/im-online/src/weights.rs b/frame/im-online/src/weights.rs index 147ce11682b71..83ec294e8edb8 100644 --- a/frame/im-online/src/weights.rs +++ b/frame/im-online/src/weights.rs @@ -44,7 +44,6 @@ use sp_std::marker::PhantomData; /// Weight functions needed for pallet_im_online. pub trait WeightInfo { fn validate_unsigned_and_then_heartbeat(k: u32, e: u32, ) -> Weight; - } /// Weights for pallet_im_online using the Substrate node and recommended hardware. @@ -56,9 +55,7 @@ impl WeightInfo for SubstrateWeight { .saturating_add((481_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) - } - } // For backwards compatibility and tests @@ -69,7 +66,5 @@ impl WeightInfo for () { .saturating_add((481_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) - } - } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index ce9b2053ff184..4b60ec8bc3ca8 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-keyring = { version = "3.0.0", optional = true, path = "../../primitives/keyring" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -31,7 +30,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "sp-keyring", "codec/std", "sp-core/std", diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index 6ea39e9ccc23e..625a994af38f6 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -24,7 +24,7 @@ use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; -use crate::Module as Indices; +use crate::Pallet as Indices; const SEED: u32 = 0; diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index c925d3a0533e0..19697f2d941bb 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -29,86 +29,51 @@ use sp_std::prelude::*; use codec::Codec; use sp_runtime::MultiAddress; use sp_runtime::traits::{ - StaticLookup, Member, LookupError, Zero, Saturating, AtLeast32Bit + StaticLookup, LookupError, Zero, Saturating, AtLeast32Bit }; -use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; -use frame_support::dispatch::DispatchResult; -use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; -use frame_system::{ensure_signed, ensure_root}; +use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved}; pub use weights::WeightInfo; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -/// The module's config trait. -pub trait Config: frame_system::Config { - /// Type used for storing an account's index; implies the maximum number of accounts the system - /// can hold. - type AccountIndex: Parameter + Member + Codec + Default + AtLeast32Bit + Copy; +pub use pallet::*; - /// The currency trait. - type Currency: ReservableCurrency; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// The deposit needed for reserving an index. - type Deposit: Get>; + /// The module's config trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type used for storing an account's index; implies the maximum number of accounts the system + /// can hold. + type AccountIndex: Parameter + Member + MaybeSerializeDeserialize + Codec + Default + AtLeast32Bit + Copy; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The currency trait. + type Currency: ReservableCurrency; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} - -decl_storage! { - trait Store for Module as Indices { - /// The lookup from index to account. - pub Accounts build(|config: &GenesisConfig| - config.indices.iter() - .cloned() - .map(|(a, b)| (a, (b, Zero::zero(), false))) - .collect::>() - ): map hasher(blake2_128_concat) T::AccountIndex => Option<(T::AccountId, BalanceOf, bool)>; - } - add_extra_genesis { - config(indices): Vec<(T::AccountIndex, T::AccountId)>; - } -} + /// The deposit needed for reserving an index. + #[pallet::constant] + type Deposit: Get>; -decl_event!( - pub enum Event where - ::AccountId, - ::AccountIndex - { - /// A account index was assigned. \[index, who\] - IndexAssigned(AccountId, AccountIndex), - /// A account index has been freed up (unassigned). \[index\] - IndexFreed(AccountIndex), - /// A account index has been frozen to its current account ID. \[index, who\] - IndexFrozen(AccountIndex, AccountId), - } -); + /// The overarching event type. + type Event: From> + IsType<::Event>; -decl_error! { - pub enum Error for Module { - /// The index was not already assigned. - NotAssigned, - /// The index is assigned to another account. - NotOwner, - /// The index was not available. - InUse, - /// The source and destination accounts are identical. - NotTransfer, - /// The index is permanent and may not be freed/changed. - Permanent, + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = frame_system { - /// The deposit needed for reserving an index. - const Deposit: BalanceOf = T::Deposit::get(); + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet {} + #[pallet::call] + impl Pallet { /// Assign an previously unassigned index. /// /// Payment: `Deposit` is reserved from the sender account. @@ -127,8 +92,8 @@ decl_module! { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::WeightInfo::claim()] - fn claim(origin, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::claim())] + pub(crate) fn claim(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| { @@ -136,7 +101,8 @@ decl_module! { *maybe_value = Some((who.clone(), T::Deposit::get(), false)); T::Currency::reserve(&who, T::Deposit::get()) })?; - Self::deposit_event(RawEvent::IndexAssigned(who, index)); + Self::deposit_event(Event::IndexAssigned(who, index)); + Ok(()) } /// Assign an index already owned by the sender to another account. The balance reservation @@ -159,8 +125,12 @@ decl_module! { /// - Reads: Indices Accounts, System Account (recipient) /// - Writes: Indices Accounts, System Account (recipient) /// # - #[weight = T::WeightInfo::transfer()] - fn transfer(origin, new: T::AccountId, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::transfer())] + pub(crate) fn transfer( + origin: OriginFor, + new: T::AccountId, + index: T::AccountIndex, + ) -> DispatchResult { let who = ensure_signed(origin)?; ensure!(who != new, Error::::NotTransfer); @@ -172,7 +142,8 @@ decl_module! { *maybe_value = Some((new.clone(), amount.saturating_sub(lost), false)); Ok(()) })?; - Self::deposit_event(RawEvent::IndexAssigned(new, index)); + Self::deposit_event(Event::IndexAssigned(new, index)); + Ok(()) } /// Free up an index owned by the sender. @@ -193,8 +164,8 @@ decl_module! { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::WeightInfo::free()] - fn free(origin, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::free())] + pub(crate) fn free(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -204,7 +175,8 @@ decl_module! { T::Currency::unreserve(&who, amount); Ok(()) })?; - Self::deposit_event(RawEvent::IndexFreed(index)); + Self::deposit_event(Event::IndexFreed(index)); + Ok(()) } /// Force an index to an account. This doesn't require a deposit. If the index is already @@ -228,8 +200,13 @@ decl_module! { /// - Reads: Indices Accounts, System Account (original owner) /// - Writes: Indices Accounts, System Account (original owner) /// # - #[weight = T::WeightInfo::force_transfer()] - fn force_transfer(origin, new: T::AccountId, index: T::AccountIndex, freeze: bool) { + #[pallet::weight(T::WeightInfo::force_transfer())] + pub(crate) fn force_transfer( + origin: OriginFor, + new: T::AccountId, + index: T::AccountIndex, + freeze: bool, + ) -> DispatchResult { ensure_root(origin)?; Accounts::::mutate(index, |maybe_value| { @@ -238,7 +215,8 @@ decl_module! { } *maybe_value = Some((new.clone(), Zero::zero(), freeze)); }); - Self::deposit_event(RawEvent::IndexAssigned(new, index)); + Self::deposit_event(Event::IndexAssigned(new, index)); + Ok(()) } /// Freeze an index so it will always point to the sender account. This consumes the deposit. @@ -258,8 +236,8 @@ decl_module! { /// ------------------- /// - DB Weight: 1 Read/Write (Accounts) /// # - #[weight = T::WeightInfo::freeze()] - fn freeze(origin, index: T::AccountIndex) { + #[pallet::weight(T::WeightInfo::freeze())] + pub(crate) fn freeze(origin: OriginFor, index: T::AccountIndex) -> DispatchResult { let who = ensure_signed(origin)?; Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { @@ -270,12 +248,74 @@ decl_module! { *maybe_value = Some((account, Zero::zero(), true)); Ok(()) })?; - Self::deposit_event(RawEvent::IndexFrozen(index, who)); + Self::deposit_event(Event::IndexFrozen(index, who)); + Ok(()) + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", T::AccountIndex = "AccountIndex")] + pub enum Event { + /// A account index was assigned. \[index, who\] + IndexAssigned(T::AccountId, T::AccountIndex), + /// A account index has been freed up (unassigned). \[index\] + IndexFreed(T::AccountIndex), + /// A account index has been frozen to its current account ID. \[index, who\] + IndexFrozen(T::AccountIndex, T::AccountId), + } + + /// Old name generated by `decl_event`. + #[deprecated(note="use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// The index was not already assigned. + NotAssigned, + /// The index is assigned to another account. + NotOwner, + /// The index was not available. + InUse, + /// The source and destination accounts are identical. + NotTransfer, + /// The index is permanent and may not be freed/changed. + Permanent, + } + + /// The lookup from index to account. + #[pallet::storage] + pub type Accounts = StorageMap< + _, Blake2_128Concat, + T::AccountIndex, + (T::AccountId, BalanceOf, bool) + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub indices: Vec<(T::AccountIndex, T::AccountId)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + indices: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + for (a, b) in &self.indices { + >::insert(a, (b, >::zero(), false)) + } } } } -impl Module { +impl Pallet { // PUBLIC IMMUTABLES /// Lookup an T::AccountIndex to get an Id, if there's one there. @@ -295,7 +335,7 @@ impl Module { } } -impl StaticLookup for Module { +impl StaticLookup for Pallet { type Source = MultiAddress; type Target = T::AccountId; diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index 01db4b50f5085..efaaa0212467b 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -68,6 +68,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { diff --git a/frame/lottery/src/lib.rs b/frame/lottery/src/lib.rs index 94b7dd459889a..a37238a2d9f85 100644 --- a/frame/lottery/src/lib.rs +++ b/frame/lottery/src/lib.rs @@ -56,7 +56,7 @@ pub mod weights; use sp_std::prelude::*; use sp_runtime::{ - DispatchError, ModuleId, + DispatchError, traits::{AccountIdConversion, Saturating, Zero}, }; use frame_support::{ @@ -66,7 +66,7 @@ use frame_support::{ Currency, ReservableCurrency, Get, EnsureOrigin, ExistenceRequirement::KeepAlive, Randomness, }, }; -use frame_support::weights::Weight; +use frame_support::{weights::Weight, PalletId}; use frame_system::ensure_signed; use codec::{Encode, Decode}; pub use weights::WeightInfo; @@ -76,7 +76,7 @@ type BalanceOf = <::Currency as Currency<; + type PalletId: Get; /// A dispatchable call. type Call: Parameter + Dispatchable + GetDispatchInfo + From>; @@ -209,7 +209,9 @@ decl_error! { decl_module! { pub struct Module for enum Call where origin: T::Origin, system = frame_system { - const ModuleId: ModuleId = T::ModuleId::get(); + type Error = Error; + + const PalletId: PalletId = T::PalletId::get(); const MaxCalls: u32 = T::MaxCalls::get() as u32; fn deposit_event() = default; @@ -359,7 +361,7 @@ impl Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() + T::PalletId::get().into_account() } /// Return the pot account and amount of money in the pot. @@ -447,7 +449,7 @@ impl Module { // TODO: deal with randomness freshness // https://github.com/paritytech/substrate/issues/8311 fn generate_random_number(seed: u32) -> u32 { - let (random_seed, _) = T::Randomness::random(&(T::ModuleId::get(), seed).encode()); + let (random_seed, _) = T::Randomness::random(&(T::PalletId::get(), seed).encode()); let random_number = ::decode(&mut random_seed.as_ref()) .expect("secure hashes should always be bigger than u32; qed"); random_number diff --git a/frame/lottery/src/mock.rs b/frame/lottery/src/mock.rs index a776896921a7f..ca372cc37e24e 100644 --- a/frame/lottery/src/mock.rs +++ b/frame/lottery/src/mock.rs @@ -78,6 +78,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -95,13 +96,13 @@ impl pallet_balances::Config for Test { } parameter_types! { - pub const LotteryModuleId: ModuleId = ModuleId(*b"py/lotto"); + pub const LotteryPalletId: PalletId = PalletId(*b"py/lotto"); pub const MaxCalls: usize = 2; pub const MaxGenerateRandom: u32 = 10; } impl Config for Test { - type ModuleId = LotteryModuleId; + type PalletId = LotteryPalletId; type Call = Call; type Currency = Balances; type Randomness = TestRandomness; diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 37e7aa2cb8248..37f9552598cc2 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -13,13 +13,15 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } +log = { version = "0.4.0", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } + +frame-benchmarking = { version = "3.1.0", optional = true, default-features = false, path = "../benchmarking" } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core" } @@ -27,12 +29,19 @@ sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ - "serde", "codec/std", - "sp-runtime/std", + "log/std", "sp-std/std", "sp-io/std", + "sp-runtime/std", "frame-support/std", "frame-system/std", + "frame-benchmarking/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index e26af3ce9b71a..62c9e5eae1a65 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -18,7 +18,7 @@ //! # Membership Module //! //! Allows control of membership of a set of `AccountId`s, useful for managing membership of of a -//! collective. A prime member may be set. +//! collective. A prime member may be set // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -26,11 +26,14 @@ use sp_std::prelude::*; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, - traits::{ChangeMembers, InitializeMembers, EnsureOrigin, Contains}, + traits::{ChangeMembers, InitializeMembers, EnsureOrigin, Contains, SortedMembers, Get}, }; use frame_system::ensure_signed; -pub trait Config: frame_system::Config { +pub mod weights; +pub use weights::WeightInfo; + +pub trait Config: frame_system::Config { /// The overarching event type. type Event: From> + Into<::Event>; @@ -56,6 +59,16 @@ pub trait Config: frame_system::Config { /// The receiver of the signal for when the membership has changed. type MembershipChanged: ChangeMembers; + + /// The maximum number of members that this membership can have. + /// + /// This is used for benchmarking. Re-run the benchmarks if this changes. + /// + /// This is not enforced in the code; the membership size can exceed this limit. + type MaxMembers: Get; + + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } decl_storage! { @@ -113,6 +126,8 @@ decl_module! { for enum Call where origin: T::Origin { + type Error = Error; + fn deposit_event() = default; /// Add a member `who` to the set. @@ -125,6 +140,8 @@ decl_module! { let mut members = >::get(); let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; members.insert(location, who.clone()); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); @@ -142,6 +159,8 @@ decl_module! { let mut members = >::get(); let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; members.remove(location); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); @@ -166,6 +185,8 @@ decl_module! { let _ = members.binary_search(&add).err().ok_or(Error::::AlreadyMember)?; members[location] = add.clone(); members.sort(); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted( @@ -191,10 +212,10 @@ decl_module! { >::mutate(|m| { T::MembershipChanged::set_members_sorted(&members[..], m); Self::rejig_prime(&members); + Self::maybe_warn_max_members(&members); *m = members; }); - Self::deposit_event(RawEvent::MembersReset); } @@ -213,6 +234,8 @@ decl_module! { let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; members[location] = new.clone(); members.sort(); + + Self::maybe_warn_max_members(&members); >::put(&members); T::MembershipChanged::change_members_sorted( @@ -262,9 +285,26 @@ impl, I: Instance> Module { } } } + + fn maybe_warn_max_members(members: &[T::AccountId]) { + if members.len() as u32 > T::MaxMembers::get() { + log::error!( + target: "runtime::membership", + "maximum number of members used for weight is exceeded, weights can be underestimated [{} > {}].", + members.len(), + T::MaxMembers::get(), + ) + } + } } impl, I: Instance> Contains for Module { + fn contains(t: &T::AccountId) -> bool { + Self::members().binary_search(t).is_ok() + } +} + +impl, I: Instance> SortedMembers for Module { fn sorted_members() -> Vec { Self::members() } @@ -274,6 +314,149 @@ impl, I: Instance> Contains for Module { } } +#[cfg(feature = "runtime-benchmarks")] +mod benchmark { + use super::{*, Module as Membership}; + use frame_system::RawOrigin; + use frame_support::{traits::EnsureOrigin, assert_ok}; + use frame_benchmarking::{benchmarks_instance, whitelist, account, impl_benchmark_test_suite}; + + const SEED: u32 = 0; + + fn set_members, I: Instance>(members: Vec, prime: Option) { + let reset_origin = T::ResetOrigin::successful_origin(); + let prime_origin = T::PrimeOrigin::successful_origin(); + + assert_ok!(>::reset_members(reset_origin, members.clone())); + if let Some(prime) = prime.map(|i| members[i].clone()) { + assert_ok!(>::set_prime(prime_origin, prime)); + } else { + assert_ok!(>::clear_prime(prime_origin)); + } + } + + benchmarks_instance! { + add_member { + let m in 1 .. T::MaxMembers::get(); + + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), None); + let new_member = account::("add", m, SEED); + }: { + assert_ok!(>::add_member(T::AddOrigin::successful_origin(), new_member.clone())); + } + verify { + assert!(>::get().contains(&new_member)); + #[cfg(test)] crate::tests::clean(); + } + + // the case of no prime or the prime being removed is surely cheaper than the case of + // reporting a new prime via `MembershipChanged`. + remove_member { + let m in 2 .. T::MaxMembers::get(); + + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), Some(members.len() - 1)); + + let to_remove = members.first().cloned().unwrap(); + }: { + assert_ok!(>::remove_member(T::RemoveOrigin::successful_origin(), to_remove.clone())); + } verify { + assert!(!>::get().contains(&to_remove)); + // prime is rejigged + assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + // we remove a non-prime to make sure it needs to be set again. + swap_member { + let m in 2 .. T::MaxMembers::get(); + + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), Some(members.len() - 1)); + let add = account::("member", m, SEED); + let remove = members.first().cloned().unwrap(); + }: { + assert_ok!(>::swap_member( + T::SwapOrigin::successful_origin(), + remove.clone(), + add.clone(), + )); + } verify { + assert!(!>::get().contains(&remove)); + assert!(>::get().contains(&add)); + // prime is rejigged + assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + // er keep the prime common between incoming and outgoing to make sure it is rejigged. + reset_member { + let m in 1 .. T::MaxMembers::get(); + + let members = (1..m+1).map(|i| account("member", i, SEED)).collect::>(); + set_members::(members.clone(), Some(members.len() - 1)); + let mut new_members = (m..2*m).map(|i| account("member", i, SEED)).collect::>(); + }: { + assert_ok!(>::reset_members(T::ResetOrigin::successful_origin(), new_members.clone())); + } verify { + new_members.sort(); + assert_eq!(>::get(), new_members); + // prime is rejigged + assert!(>::get().is_some() && T::MembershipChanged::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + change_key { + let m in 1 .. T::MaxMembers::get(); + + // worse case would be to change the prime + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let prime = members.last().cloned().unwrap(); + set_members::(members.clone(), Some(members.len() - 1)); + + let add = account::("member", m, SEED); + whitelist!(prime); + }: { + assert_ok!(>::change_key(RawOrigin::Signed(prime.clone()).into(), add.clone())); + } verify { + assert!(!>::get().contains(&prime)); + assert!(>::get().contains(&add)); + // prime is rejigged + assert_eq!(>::get().unwrap(), add); + #[cfg(test)] crate::tests::clean(); + } + + set_prime { + let m in 1 .. T::MaxMembers::get(); + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let prime = members.last().cloned().unwrap(); + set_members::(members, None); + }: { + assert_ok!(>::set_prime(T::PrimeOrigin::successful_origin(), prime)); + } verify { + assert!(>::get().is_some()); + assert!(::get_prime().is_some()); + #[cfg(test)] crate::tests::clean(); + } + + clear_prime { + let m in 1 .. T::MaxMembers::get(); + let members = (0..m).map(|i| account("member", i, SEED)).collect::>(); + let prime = members.last().cloned().unwrap(); + set_members::(members, None); + }: { + assert_ok!(>::clear_prime(T::PrimeOrigin::successful_origin())); + } verify { + assert!(>::get().is_none()); + assert!(::get_prime().is_none()); + #[cfg(test)] crate::tests::clean(); + } + } + + impl_benchmark_test_suite!(Membership, crate::tests::new_bench_ext(), crate::tests::Test,); +} + #[cfg(test)] mod tests { use super::*; @@ -300,11 +483,13 @@ mod tests { parameter_types! { pub const BlockHashCount: u64 = 250; + pub const MaxMembers: u32 = 10; pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(1024); pub static Members: Vec = vec![]; pub static Prime: Option = None; } + impl frame_system::Config for Test { type BaseCallFilter = (); type BlockWeights = (); @@ -328,6 +513,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } ord_parameter_types! { pub const One: u64 = 1; @@ -340,7 +526,7 @@ mod tests { pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + let mut old_plus_incoming = Members::get(); old_plus_incoming.extend_from_slice(incoming); old_plus_incoming.sort(); let mut new_plus_outgoing = new.to_vec(); @@ -348,13 +534,17 @@ mod tests { new_plus_outgoing.sort(); assert_eq!(old_plus_incoming, new_plus_outgoing); - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); - PRIME.with(|p| *p.borrow_mut() = None); + Members::set(new.to_vec()); + Prime::set(None); } fn set_prime(who: Option) { - PRIME.with(|p| *p.borrow_mut() = who); + Prime::set(who); + } + fn get_prime() -> Option { + Prime::get() } } + impl InitializeMembers for TestChangeMembers { fn initialize_members(members: &[u64]) { MEMBERS.with(|m| *m.borrow_mut() = members.to_vec()); @@ -370,9 +560,11 @@ mod tests { type PrimeOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; + type MaxMembers = MaxMembers; + type WeightInfo = (); } - fn new_test_ext() -> sp_io::TestExternalities { + pub(crate) fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. pallet_membership::GenesisConfig::{ @@ -382,6 +574,17 @@ mod tests { t.into() } + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn new_bench_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() + } + + #[cfg(feature = "runtime-benchmarks")] + pub(crate) fn clean() { + Members::set(vec![]); + Prime::set(None); + } + #[test] fn query_membership_works() { new_test_ext().execute_with(|| { diff --git a/frame/membership/src/weights.rs b/frame/membership/src/weights.rs new file mode 100644 index 0000000000000..fbdb44caec84c --- /dev/null +++ b/frame/membership/src/weights.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_membership +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-17, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_membership +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/membership/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_membership. +pub trait WeightInfo { + fn add_member(m: u32, ) -> Weight; + fn remove_member(m: u32, ) -> Weight; + fn swap_member(m: u32, ) -> Weight; + fn reset_member(m: u32, ) -> Weight; + fn change_key(m: u32, ) -> Weight; + fn set_prime(m: u32, ) -> Weight; + fn clear_prime(m: u32, ) -> Weight; +} + +/// Weights for pallet_membership using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn add_member(m: u32, ) -> Weight { + (25_448_000 as Weight) + // Standard Error: 3_000 + .saturating_add((257_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn remove_member(m: u32, ) -> Weight { + (31_317_000 as Weight) + // Standard Error: 0 + .saturating_add((215_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn swap_member(m: u32, ) -> Weight { + (31_208_000 as Weight) + // Standard Error: 0 + .saturating_add((229_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn reset_member(m: u32, ) -> Weight { + (31_673_000 as Weight) + // Standard Error: 1_000 + .saturating_add((455_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + } + fn change_key(m: u32, ) -> Weight { + (33_499_000 as Weight) + // Standard Error: 0 + .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn set_prime(m: u32, ) -> Weight { + (8_865_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn clear_prime(m: u32, ) -> Weight { + (3_397_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn add_member(m: u32, ) -> Weight { + (25_448_000 as Weight) + // Standard Error: 3_000 + .saturating_add((257_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn remove_member(m: u32, ) -> Weight { + (31_317_000 as Weight) + // Standard Error: 0 + .saturating_add((215_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn swap_member(m: u32, ) -> Weight { + (31_208_000 as Weight) + // Standard Error: 0 + .saturating_add((229_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn reset_member(m: u32, ) -> Weight { + (31_673_000 as Weight) + // Standard Error: 1_000 + .saturating_add((455_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + } + fn change_key(m: u32, ) -> Weight { + (33_499_000 as Weight) + // Standard Error: 0 + .saturating_add((226_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn set_prime(m: u32, ) -> Weight { + (8_865_000 as Weight) + // Standard Error: 0 + .saturating_add((124_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn clear_prime(m: u32, ) -> Weight { + (3_397_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(m as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } +} diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 8861ba5c0c8b9..6ca451c4ab489 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -18,7 +18,6 @@ frame-support = { version = "3.0.0", default-features = false, path = "../suppor frame-system = { version = "3.0.0", default-features = false, path = "../system" } mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.1" } pallet-mmr-primitives = { version = "3.0.0", default-features = false, path = "./primitives" } -serde = { version = "1.0.101", optional = true } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -37,7 +36,6 @@ std = [ "frame-system/std", "mmr-lib/std", "pallet-mmr-primitives/std", - "serde", "sp-core/std", "sp-io/std", "sp-runtime/std", diff --git a/frame/merkle-mountain-range/src/mock.rs b/frame/merkle-mountain-range/src/mock.rs index 072724a58afe5..3c8a5d284566f 100644 --- a/frame/merkle-mountain-range/src/mock.rs +++ b/frame/merkle-mountain-range/src/mock.rs @@ -71,6 +71,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl Config for Test { diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index e48f80567f67a..7657f64c819fb 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -31,7 +30,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-runtime/std", "frame-support/std", diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index a3a3edc34f1a9..118cfebdbdce6 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -71,6 +71,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 6c8b609b401ca..12db6f905f2ec 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } @@ -28,7 +27,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-io/std", diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 67e62a09da64a..1afe55756777a 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -292,6 +292,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index 786eb84d1e523..6e657758e8e99 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -12,7 +12,6 @@ description = "FRAME pallet for node authorization" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -25,7 +24,6 @@ log = { version = "0.4.14", default-features = false } [features] default = ["std"] std = [ - "serde", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/node-authorization/src/mock.rs b/frame/node-authorization/src/mock.rs index 5118f07c7694e..3f4f894cdf7e3 100644 --- a/frame/node-authorization/src/mock.rs +++ b/frame/node-authorization/src/mock.rs @@ -70,6 +70,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } ord_parameter_types! { diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 223d6d4d477a6..a0a09e0fbb897 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -65,6 +65,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index ab45bb0837b56..52dd55207af07 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -124,6 +124,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 2934b9953b316..d8f7afe433cb3 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -32,7 +31,6 @@ pallet-utility = { version = "3.0.0", path = "../utility" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-runtime/std", "frame-support/std", diff --git a/frame/proxy/src/tests.rs b/frame/proxy/src/tests.rs index 797a5ee3d4694..6f3b1f35e2ada 100644 --- a/frame/proxy/src/tests.rs +++ b/frame/proxy/src/tests.rs @@ -73,6 +73,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 5ef76a33c21f4..724605c6238b6 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -193,6 +193,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 1f8003bd4d056..acfd2f613f839 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -29,7 +28,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-io/std", diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index cb991e64945a1..ceb2f5a688742 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -621,7 +621,8 @@ decl_module! { let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; // Move the reserved funds from the rescuer to the rescued account. // Acts like a slashing mechanism for those who try to maliciously recover accounts. - let _ = T::Currency::repatriate_reserved(&rescuer, &who, active_recovery.deposit, BalanceStatus::Free); + let res = T::Currency::repatriate_reserved(&rescuer, &who, active_recovery.deposit, BalanceStatus::Free); + debug_assert!(res.is_ok()); Self::deposit_event(RawEvent::RecoveryClosed(who, rescuer)); } diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 301dd8dba8ddd..72dbc29fd7160 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -70,6 +70,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 4d82133b6af9d..8fb5d148662b3 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -10,7 +10,6 @@ description = "FRAME example pallet" readme = "README.md" [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -28,7 +27,6 @@ substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-runtime/std", "frame-benchmarking/std", diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 9848c9853d0bf..5332aedf7f136 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -76,8 +76,8 @@ pub trait Config: system::Config { type Event: From> + Into<::Event>; /// The aggregated origin which the dispatch will take. - type Origin: OriginTrait + From + IsType<::Origin>; + type Origin: OriginTrait + + From + IsType<::Origin>; /// The caller origin, overarching type of all pallets origins. type PalletsOrigin: From> + Codec + Clone + Eq; @@ -835,6 +835,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl logger::Config for Test { type Event = Event; diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index 97e3a954d7e25..0b2f4a8198833 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -29,7 +28,6 @@ sp-core = { version = "3.0.0", path = "../../primitives/core" } default = ["std"] std = [ "codec/std", - "serde", "sp-io/std", "sp-runtime/std", "sp-std/std", diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 76f9dd848d6c0..1da665f43eaef 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -79,6 +79,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_balances::Config for Test { diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 52b8ebbdf4780..44e1f2f67858b 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -35,7 +34,6 @@ lazy_static = "1.4.0" default = ["std", "historical"] historical = ["sp-trie"] std = [ - "serde", "codec/std", "sp-std/std", "sp-io/std", diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 53afeb620c260..cf2fa8a07cfe0 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -67,6 +67,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index e7b16808f7239..cbe70598a91b3 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -750,11 +750,11 @@ impl Module { let who = T::ValidatorIdOf::convert(account.clone()) .ok_or(Error::::NoAssociatedValidatorId)?; - frame_system::Pallet::::inc_consumers(&account).map_err(|_| Error::::NoAccount)?; + ensure!(frame_system::Pallet::::can_inc_consumer(&account), Error::::NoAccount); let old_keys = Self::inner_set_keys(&who, keys)?; - if old_keys.is_some() { - let _ = frame_system::Pallet::::dec_consumers(&account); - // ^^^ Defensive only; Consumers were incremented just before, so should never fail. + if old_keys.is_none() { + let assertion = frame_system::Pallet::::inc_consumers(&account).is_ok(); + debug_assert!(assertion, "can_inc_consumer() returned true; no change since; qed"); } Ok(()) @@ -777,6 +777,10 @@ impl Module { Self::key_owner(*id, key).map_or(true, |owner| &owner == who), Error::::DuplicatedKey, ); + } + + for id in T::Keys::key_ids() { + let key = keys.get_raw(*id); if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { if key == old { @@ -819,7 +823,8 @@ impl Module { >::insert(v, keys); } - fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { + /// Query the owner of a session key by returning the owner's validator ID. + pub fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { >::get((id, key_data)) } diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index b64359fccee32..3459ab73d6afe 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -250,6 +250,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_timestamp::Config for Test { diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index a528b3293dacb..f48388b5a002c 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -18,8 +18,9 @@ // Tests for the Session Pallet use super::*; +use mock::Test; use codec::Decode; -use frame_support::{traits::OnInitialize, assert_ok}; +use frame_support::{traits::OnInitialize, assert_ok, assert_noop}; use sp_core::crypto::key_types::DUMMY; use sp_runtime::testing::UintAuthorityId; use mock::{ @@ -181,11 +182,14 @@ fn duplicates_are_not_allowed() { new_test_ext().execute_with(|| { System::set_block_number(1); Session::on_initialize(1); - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_err()); - assert!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![]).is_ok()); + assert_noop!( + Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]), + Error::::DuplicatedKey, + ); + assert_ok!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![])); // is fine now that 1 has migrated off. - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_ok()); + assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![])); }); } @@ -357,7 +361,6 @@ fn return_true_if_more_than_third_is_disabled() { #[test] fn upgrade_keys() { use frame_support::storage; - use mock::Test; use sp_core::crypto::key_types::DUMMY; // This test assumes certain mocks. diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index a3c6dcadab86d..f9c2990061985 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -31,7 +30,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } default = ["std"] std = [ "codec/std", - "serde", "sp-runtime/std", "rand_chacha/std", "sp-std/std", diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 64caf328002af..3b661386da23e 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -254,13 +254,13 @@ mod tests; use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; use sp_std::prelude::*; use codec::{Encode, Decode}; -use sp_runtime::{Percent, ModuleId, RuntimeDebug, +use sp_runtime::{Percent, RuntimeDebug, traits::{ StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, Hash, TrailingZeroInput, CheckedSub } }; -use frame_support::{decl_error, decl_module, decl_storage, decl_event, ensure, dispatch::DispatchResult}; +use frame_support::{decl_error, decl_module, decl_storage, decl_event, ensure, dispatch::DispatchResult, PalletId}; use frame_support::weights::Weight; use frame_support::traits::{ Currency, ReservableCurrency, Randomness, Get, ChangeMembers, BalanceStatus, @@ -277,7 +277,7 @@ pub trait Config: system::Config { type Event: From> + Into<::Event>; /// The societies's module id - type ModuleId: Get; + type PalletId: Get; /// The currency type used for bidding. type Currency: ReservableCurrency; @@ -498,7 +498,7 @@ decl_module! { const ChallengePeriod: T::BlockNumber = T::ChallengePeriod::get(); /// The societies's module id - const ModuleId: ModuleId = T::ModuleId::get(); + const PalletId: PalletId = T::PalletId::get(); /// Maximum candidate intake per round. const MaxCandidateIntake: u32 = T::MaxCandidateIntake::get(); @@ -997,7 +997,8 @@ decl_module! { match kind { BidKind::Deposit(deposit) => { // Slash deposit and move it to the society account - let _ = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); + let res = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); + debug_assert!(res.is_ok()); } BidKind::Vouch(voucher, _) => { // Ban the voucher from vouching again @@ -1600,7 +1601,7 @@ impl, I: Instance> Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() + T::PalletId::get().into_account() } /// The account ID of the payouts pot. This is where payouts are made from. @@ -1608,7 +1609,7 @@ impl, I: Instance> Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn payouts() -> T::AccountId { - T::ModuleId::get().into_sub_account(b"payouts") + T::PalletId::get().into_sub_account(b"payouts") } /// Return the duration of the lock, in blocks, with the given number of members. diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index ff80b50b6d358..aa46d40a14ae9 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -58,7 +58,7 @@ parameter_types! { pub const BlockHashCount: u64 = 250; pub const ExistentialDeposit: u64 = 1; pub const MaxCandidateIntake: u32 = 10; - pub const SocietyModuleId: ModuleId = ModuleId(*b"py/socie"); + pub const SocietyPalletId: PalletId = PalletId(*b"py/socie"); pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(1024); } @@ -91,6 +91,7 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_balances::Config for Test { @@ -118,7 +119,7 @@ impl Config for Test { type SuspensionJudgementOrigin = EnsureSignedBy; type ChallengePeriod = ChallengePeriod; type MaxCandidateIntake = MaxCandidateIntake; - type ModuleId = SocietyModuleId; + type PalletId = SocietyPalletId; } pub struct EnvBuilder { diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 8fe7975cef068..11d810a26e175 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -64,6 +64,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: Balance = 10; diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 31735f75ebc14..4252eae50d9bf 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -290,7 +290,7 @@ use codec::{HasCompact, Encode, Decode}; use frame_support::{ decl_module, decl_event, decl_storage, ensure, decl_error, weights::{ - Weight, + Weight, WithPostDispatchInfo, constants::{WEIGHT_PER_MICROS, WEIGHT_PER_NANOS}, }, storage::IterableStorageMap, @@ -313,8 +313,6 @@ use sp_staking::{ SessionIndex, offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence, OffenceError}, }; -#[cfg(feature = "std")] -use sp_runtime::{Serialize, Deserialize}; use frame_system::{ self as system, ensure_signed, ensure_root, offchain::SendTransactionTypes, @@ -380,7 +378,7 @@ pub struct EraRewardPoints { /// Indicates the initial status of the staker. #[derive(RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum StakerStatus { /// Chilling. Idle, @@ -793,7 +791,7 @@ pub trait Config: frame_system::Config + SendTransactionTypes> { /// Mode of era-forcing. #[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub enum Forcing { /// Not forcing anything - just let whatever happen. NotForcing, @@ -1036,30 +1034,15 @@ pub mod migrations { pub mod v6 { use super::*; - use frame_support::{traits::Get, weights::Weight, pallet_prelude::*}; - - macro_rules! generate_storage_types { - ($name:ident => Value<$value:ty>) => { - paste::paste! { - struct [<$name Instance>]; - impl frame_support::traits::StorageInstance for [<$name Instance>] { - fn pallet_prefix() -> &'static str { - "Staking" - } - const STORAGE_PREFIX: &'static str = stringify!($name); - } - type $name = StorageValue<[<$name Instance>], $value, ValueQuery>; - } - } - } + use frame_support::{traits::Get, weights::Weight, generate_storage_alias}; // NOTE: value type doesn't matter, we just set it to () here. - generate_storage_types!(SnapshotValidators => Value<()>); - generate_storage_types!(SnapshotNominators => Value<()>); - generate_storage_types!(QueuedElected => Value<()>); - generate_storage_types!(QueuedScore => Value<()>); - generate_storage_types!(EraElectionStatus => Value<()>); - generate_storage_types!(IsCurrentSessionFinal => Value<()>); + generate_storage_alias!(Staking, SnapshotValidators => Value<()>); + generate_storage_alias!(Staking, SnapshotNominators => Value<()>); + generate_storage_alias!(Staking, QueuedElected => Value<()>); + generate_storage_alias!(Staking, QueuedScore => Value<()>); + generate_storage_alias!(Staking, EraElectionStatus => Value<()>); + generate_storage_alias!(Staking, IsCurrentSessionFinal => Value<()>); /// check to execute prior to migration. pub fn pre_migrate() -> Result<(), &'static str> { @@ -1205,6 +1188,11 @@ decl_module! { } } + fn on_initialize(_now: T::BlockNumber) -> Weight { + // just return the weight of the on_finalize. + T::DbWeight::get().reads(1) + } + fn on_finalize() { // Set the start of the first era. if let Some(mut active_era) = Self::active_era() { @@ -1803,7 +1791,7 @@ decl_module! { /// Paying even a dead controller is cheaper weight-wise. We don't do any refunds here. /// # #[weight = T::WeightInfo::payout_stakers_alive_staked(T::MaxNominatorRewardedPerValidator::get())] - fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { + fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { ensure_signed(origin)?; Self::do_payout_stakers(validator_stash, era) } @@ -1967,24 +1955,35 @@ impl Module { }) } - fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { + fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResultWithPostInfo { // Validate input data - let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; - ensure!(era <= current_era, Error::::InvalidEraToReward); + let current_era = CurrentEra::get().ok_or( + Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?; let history_depth = Self::history_depth(); - ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); + ensure!( + era <= current_era && era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + ); // Note: if era has no reward to be claimed, era may be future. better not to update // `ledger.claimed_rewards` in this case. let era_payout = >::get(&era) - .ok_or_else(|| Error::::InvalidEraToReward)?; - - let controller = Self::bonded(&validator_stash).ok_or(Error::::NotStash)?; + .ok_or_else(|| + Error::::InvalidEraToReward + .with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?; + + let controller = Self::bonded(&validator_stash).ok_or( + Error::::NotStash.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?; let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err(Error::::AlreadyClaimed)?, + Ok(_) => Err( + Error::::AlreadyClaimed.with_weight(T::WeightInfo::payout_stakers_alive_staked(0)) + )?, Err(pos) => ledger.claimed_rewards.insert(pos, era), } @@ -2008,7 +2007,9 @@ impl Module { .unwrap_or_else(|| Zero::zero()); // Nothing to do if they have no reward points. - if validator_reward_points.is_zero() { return Ok(())} + if validator_reward_points.is_zero() { + return Ok(Some(T::WeightInfo::payout_stakers_alive_staked(0)).into()) + } // This is the fraction of the total reward that the validator and the // nominators will get. @@ -2041,6 +2042,10 @@ impl Module { Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); } + // Track the number of payout ops to nominators. Note: `WeightInfo::payout_stakers_alive_staked` + // always assumes at least a validator is paid out, so we do not need to count their payout op. + let mut nominator_payout_count: u32 = 0; + // Lets now calculate how this is split to the nominators. // Reward only the clipped exposures. Note this is not necessarily sorted. for nominator in exposure.others.iter() { @@ -2052,11 +2057,14 @@ impl Module { let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; // We can now make nominator payout: if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { + // Note: this logic does not count payouts for `RewardDestination::None`. + nominator_payout_count += 1; Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); } } - Ok(()) + debug_assert!(nominator_payout_count <= T::MaxNominatorRewardedPerValidator::get()); + Ok(Some(T::WeightInfo::payout_stakers_alive_staked(nominator_payout_count)).into()) } /// Update the ledger for a controller. @@ -2728,11 +2736,8 @@ impl Convert for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { - if let Some(active_era) = >::active_era() { - Some(>::eras_stakers(active_era.index, &validator)) - } else { - None - } + >::active_era() + .map(|active_era| >::eras_stakers(active_era.index, &validator)) } } diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 03f5acfad7286..188eda801095e 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -150,6 +150,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_balances::Config for Test { type MaxLocks = MaxLocks; @@ -269,6 +270,8 @@ where } pub type Extrinsic = TestXt; +pub(crate) type StakingCall = crate::Call; +pub(crate) type TestRuntimeCall = ::Call; pub struct ExtBuilder { validator_pool: bool, diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 2b2ac61356c47..fd0a63b288ab2 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -47,7 +47,7 @@ //! has multiple misbehaviors. However, accounting for such cases is necessary //! to deter a class of "rage-quit" attacks. //! -//! Based on research at +//! Based on research at use super::{ EraIndex, Config, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index df3456bf29926..05eb6fdc5e028 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -20,12 +20,14 @@ use super::*; use mock::*; use sp_runtime::{ - assert_eq_error_rate, traits::BadOrigin, + assert_eq_error_rate, + traits::{BadOrigin, Dispatchable}, }; use sp_staking::offence::OffenceDetails; use frame_support::{ assert_ok, assert_noop, StorageMap, traits::{Currency, ReservableCurrency, OnInitialize}, + weights::{extract_actual_weight, GetDispatchInfo}, }; use pallet_balances::Error as BalancesError; use substrate_test_utils::assert_eq_uvec; @@ -435,7 +437,8 @@ fn no_candidate_emergency_condition() { ::MinimumValidatorCount::put(10); // try to chill - let _ = Staking::chill(Origin::signed(10)); + let res = Staking::chill(Origin::signed(10)); + assert_ok!(res); // trigger era mock::start_active_era(1); @@ -2976,6 +2979,9 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // * an invalid era to claim doesn't update last_reward // * double claim of one era fails ExtBuilder::default().nominate(true).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + let init_balance_10 = Balances::total_balance(&10); let init_balance_100 = Balances::total_balance(&100); @@ -3021,19 +3027,19 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, 0), // Fail: Era out of history - Error::::InvalidEraToReward + Error::::InvalidEraToReward.with_weight(err_weight) ); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, 2), // Fail: Double claim - Error::::AlreadyClaimed + Error::::AlreadyClaimed.with_weight(err_weight) ); assert_noop!( Staking::payout_stakers(Origin::signed(1337), 11, active_era), // Fail: Era not finished yet - Error::::InvalidEraToReward + Error::::InvalidEraToReward.with_weight(err_weight) ); // Era 0 can't be rewarded anymore and current era can't be rewarded yet @@ -3287,6 +3293,9 @@ fn test_payout_stakers() { fn payout_stakers_handles_basic_errors() { // Here we will test payouts handle all errors. ExtBuilder::default().has_stakers(false).build_and_execute(|| { + // Consumed weight for all payout_stakers dispatches that fail + let err_weight = weights::SubstrateWeight::::payout_stakers_alive_staked(0); + // Same setup as the test above let balance = 1000; bond_validator(11, 10, balance); // Default(64) @@ -3305,9 +3314,15 @@ fn payout_stakers_handles_basic_errors() { mock::start_active_era(2); // Wrong Era, too big - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 2), + Error::::InvalidEraToReward.with_weight(err_weight) + ); // Wrong Staker - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 10, 1), Error::::NotStash); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 10, 1), + Error::::NotStash.with_weight(err_weight) + ); for i in 3..100 { Staking::reward_by_ids(vec![(11, 1)]); @@ -3317,14 +3332,134 @@ fn payout_stakers_handles_basic_errors() { } // We are at era 99, with history depth of 84 // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 14), Error::::InvalidEraToReward); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 99), Error::::InvalidEraToReward); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 14), + Error::::InvalidEraToReward.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 99), + Error::::InvalidEraToReward.with_weight(err_weight) + ); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); // Can't claim again - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 15), Error::::AlreadyClaimed); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 98), Error::::AlreadyClaimed); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 15), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 98), + Error::::AlreadyClaimed.with_weight(err_weight) + ); + }); +} + +#[test] +fn payout_stakers_handles_weight_refund() { + // Note: this test relies on the assumption that `payout_stakers_alive_staked` is solely used by + // `payout_stakers` to calculate the weight of each payout op. + ExtBuilder::default().has_stakers(false).build_and_execute(|| { + let max_nom_rewarded = ::MaxNominatorRewardedPerValidator::get(); + // Make sure the configured value is meaningful for our use. + assert!(max_nom_rewarded >= 4); + let half_max_nom_rewarded = max_nom_rewarded / 2; + // Sanity check our max and half max nominator quantities. + assert!(half_max_nom_rewarded > 0); + assert!(max_nom_rewarded > half_max_nom_rewarded); + + let max_nom_rewarded_weight + = ::WeightInfo::payout_stakers_alive_staked(max_nom_rewarded); + let half_max_nom_rewarded_weight + = ::WeightInfo::payout_stakers_alive_staked(half_max_nom_rewarded); + let zero_nom_payouts_weight = ::WeightInfo::payout_stakers_alive_staked(0); + assert!(zero_nom_payouts_weight > 0); + assert!(half_max_nom_rewarded_weight > zero_nom_payouts_weight); + assert!(max_nom_rewarded_weight > half_max_nom_rewarded_weight); + + let balance = 1000; + bond_validator(11, 10, balance); + + /* Era 1 */ + start_active_era(1); + + // Reward just the validator. + Staking::reward_by_ids(vec![(11, 1)]); + + // Add some `half_max_nom_rewarded` nominators who will start backing the validator in the + // next era. + for i in 0..half_max_nom_rewarded { + bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); + } + + /* Era 2 */ + start_active_era(2); + + // Collect payouts when there are no nominators + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 1)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!( + extract_actual_weight(&result, &info), + zero_nom_payouts_weight + ); + + // The validator is not rewarded in this era; so there will be zero payouts to claim for this era. + + /* Era 3 */ + start_active_era(3); + + // Collect payouts for an era where the validator did not receive any points. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 2)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); + + // Reward the validator and its nominators. + Staking::reward_by_ids(vec![(11, 1)]); + + /* Era 4 */ + start_active_era(4); + + // Collect payouts when the validator has `half_max_nom_rewarded` nominators. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 3)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), half_max_nom_rewarded_weight); + + // Add enough nominators so that we are at the limit. They will be active nominators + // in the next era. + for i in half_max_nom_rewarded..max_nom_rewarded { + bond_nominator((1000 + i).into(), (100 + i).into(), balance + i as Balance, vec![11]); + } + + /* Era 5 */ + start_active_era(5); + // We now have `max_nom_rewarded` nominators actively nominating our validator. + + // Reward the validator so we can collect for everyone in the next era. + Staking::reward_by_ids(vec![(11, 1)]); + + /* Era 6 */ + start_active_era(6); + + // Collect payouts when the validator had `half_max_nom_rewarded` nominators. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 5)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert_ok!(result); + assert_eq!(extract_actual_weight(&result, &info), max_nom_rewarded_weight); + + // Try and collect payouts for an era that has already been collected. + let call = TestRuntimeCall::Staking(StakingCall::payout_stakers(11, 5)); + let info = call.get_dispatch_info(); + let result = call.dispatch(Origin::signed(20)); + assert!(result.is_err()); + // When there is an error the consumed weight == weight when there are 0 nominator payouts. + assert_eq!(extract_actual_weight(&result, &info), zero_nom_payouts_weight); }); } @@ -3693,6 +3828,14 @@ fn do_not_die_when_active_is_ed() { }) } +#[test] +fn on_finalize_weight_is_nonzero() { + ExtBuilder::default().build_and_execute(|| { + let on_finalize_weight = ::DbWeight::get().reads(1); + assert!(Staking::on_initialize(1) >= on_finalize_weight); + }) +} + mod election_data_provider { use super::*; use frame_election_provider_support::ElectionDataProvider; diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index 520bef8c539b2..d3274cad8050e 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_staking //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-03-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-03-25, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -76,155 +76,155 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (82_121_000 as Weight) + (79_895_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (61_899_000 as Weight) + (60_561_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (56_392_000 as Weight) + (54_996_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (57_382_000 as Weight) + (56_056_000 as Weight) // Standard Error: 0 - .saturating_add((70_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((67_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (92_185_000 as Weight) + (90_267_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_844_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (16_892_000 as Weight) + (16_345_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_411_000 as Weight) + (27_080_000 as Weight) // Standard Error: 14_000 - .saturating_add((19_272_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((18_739_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (30_188_000 as Weight) - // Standard Error: 24_000 - .saturating_add((5_666_000 as Weight).saturating_mul(n as Weight)) + (29_101_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_670_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (15_870_000 as Weight) + (15_771_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_853_000 as Weight) + (13_329_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (30_291_000 as Weight) + (29_807_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_397_000 as Weight) + (2_323_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_627_000 as Weight) + (2_528_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_679_000 as Weight) + (2_529_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_643_000 as Weight) + (2_527_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_871_000 as Weight) + (2_661_000 as Weight) // Standard Error: 0 .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (65_876_000 as Weight) + (64_650_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_832_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_755_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_896_640_000 as Weight) - // Standard Error: 391_000 - .saturating_add((34_808_000 as Weight).saturating_mul(s as Weight)) + (5_904_642_000 as Weight) + // Standard Error: 393_000 + .saturating_add((34_810_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (137_975_000 as Weight) - // Standard Error: 20_000 - .saturating_add((54_061_000 as Weight).saturating_mul(n as Weight)) + (131_368_000 as Weight) + // Standard Error: 17_000 + .saturating_add((52_611_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (163_885_000 as Weight) - // Standard Error: 20_000 - .saturating_add((68_096_000 as Weight).saturating_mul(n as Weight)) + (165_079_000 as Weight) + // Standard Error: 27_000 + .saturating_add((66_740_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (37_847_000 as Weight) - // Standard Error: 1_000 - .saturating_add((89_000 as Weight).saturating_mul(l as Weight)) + (37_039_000 as Weight) + // Standard Error: 2_000 + .saturating_add((93_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 69_000 - .saturating_add((34_413_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 71_000 + .saturating_add((34_403_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (69_257_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_819_000 as Weight).saturating_mul(s as Weight)) + (67_561_000 as Weight) + // Standard Error: 0 + .saturating_add((2_766_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_013_000 - .saturating_add((382_529_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 50_000 - .saturating_add((63_170_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_016_000 + .saturating_add((389_979_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((63_208_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -233,12 +233,12 @@ impl WeightInfo for SubstrateWeight { } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 90_000 - .saturating_add((27_108_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 90_000 - .saturating_add((29_962_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 1_228_000 - .saturating_add((26_080_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 95_000 + .saturating_add((26_419_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 95_000 + .saturating_add((29_033_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_305_000 + .saturating_add((23_680_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -247,7 +247,7 @@ impl WeightInfo for SubstrateWeight { fn get_npos_targets(v: u32, ) -> Weight { (0 as Weight) // Standard Error: 32_000 - .saturating_add((11_220_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((11_317_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } @@ -256,155 +256,155 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (82_121_000 as Weight) + (79_895_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (61_899_000 as Weight) + (60_561_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (56_392_000 as Weight) + (54_996_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (57_382_000 as Weight) + (56_056_000 as Weight) // Standard Error: 0 - .saturating_add((70_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((67_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (92_185_000 as Weight) + (90_267_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_844_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (16_892_000 as Weight) + (16_345_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (27_411_000 as Weight) + (27_080_000 as Weight) // Standard Error: 14_000 - .saturating_add((19_272_000 as Weight).saturating_mul(k as Weight)) + .saturating_add((18_739_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (30_188_000 as Weight) - // Standard Error: 24_000 - .saturating_add((5_666_000 as Weight).saturating_mul(n as Weight)) + (29_101_000 as Weight) + // Standard Error: 23_000 + .saturating_add((5_670_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (15_870_000 as Weight) + (15_771_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_853_000 as Weight) + (13_329_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (30_291_000 as Weight) + (29_807_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_397_000 as Weight) + (2_323_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_627_000 as Weight) + (2_528_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_679_000 as Weight) + (2_529_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_643_000 as Weight) + (2_527_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_871_000 as Weight) + (2_661_000 as Weight) // Standard Error: 0 .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (65_876_000 as Weight) + (64_650_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_832_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_755_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_896_640_000 as Weight) - // Standard Error: 391_000 - .saturating_add((34_808_000 as Weight).saturating_mul(s as Weight)) + (5_904_642_000 as Weight) + // Standard Error: 393_000 + .saturating_add((34_810_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (137_975_000 as Weight) - // Standard Error: 20_000 - .saturating_add((54_061_000 as Weight).saturating_mul(n as Weight)) + (131_368_000 as Weight) + // Standard Error: 17_000 + .saturating_add((52_611_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (163_885_000 as Weight) - // Standard Error: 20_000 - .saturating_add((68_096_000 as Weight).saturating_mul(n as Weight)) + (165_079_000 as Weight) + // Standard Error: 27_000 + .saturating_add((66_740_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (37_847_000 as Weight) - // Standard Error: 1_000 - .saturating_add((89_000 as Weight).saturating_mul(l as Weight)) + (37_039_000 as Weight) + // Standard Error: 2_000 + .saturating_add((93_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 69_000 - .saturating_add((34_413_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 71_000 + .saturating_add((34_403_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (69_257_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_819_000 as Weight).saturating_mul(s as Weight)) + (67_561_000 as Weight) + // Standard Error: 0 + .saturating_add((2_766_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_013_000 - .saturating_add((382_529_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 50_000 - .saturating_add((63_170_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_016_000 + .saturating_add((389_979_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 51_000 + .saturating_add((63_208_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -413,12 +413,12 @@ impl WeightInfo for () { } fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 90_000 - .saturating_add((27_108_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 90_000 - .saturating_add((29_962_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 1_228_000 - .saturating_add((26_080_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 95_000 + .saturating_add((26_419_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 95_000 + .saturating_add((29_033_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_305_000 + .saturating_add((23_680_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) @@ -427,7 +427,7 @@ impl WeightInfo for () { fn get_npos_targets(v: u32, ) -> Weight { (0 as Weight) // Standard Error: 32_000 - .saturating_add((11_220_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((11_317_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index c1b841c30c6a3..a73dfaeb1d981 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } @@ -27,7 +26,6 @@ sp-core = { version = "3.0.0", path = "../../primitives/core" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-io/std", diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 53797d8cfc1dc..d840d45a7f430 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Sudo Module +//! # Sudo Pallet //! //! - [`Config`] //! - [`Call`] //! //! ## Overview //! -//! The Sudo module allows for a single account (called the "sudo key") +//! The Sudo pallet allows for a single account (called the "sudo key") //! to execute dispatchable functions that require a `Root` call //! or designate a new account to replace them as the sudo key. //! Only one account can be the sudo key at a time. @@ -31,7 +31,7 @@ //! //! ### Dispatchable Functions //! -//! Only the sudo key can call the dispatchable functions from the Sudo module. +//! Only the sudo key can call the dispatchable functions from the Sudo pallet. //! //! * `sudo` - Make a `Root` call to a dispatchable function. //! * `set_key` - Assign a new account to be the sudo key. @@ -40,8 +40,8 @@ //! //! ### Executing Privileged Functions //! -//! The Sudo module itself is not intended to be used within other modules. -//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other modules. +//! The Sudo pallet itself is not intended to be used within other pallets. +//! Instead, you can build "privileged functions" (i.e. functions that require `Root` origin) in other pallets. //! You can execute these privileged functions by calling `sudo` with the sudo key account. //! Privileged functions cannot be directly executed via an extrinsic. //! @@ -49,35 +49,46 @@ //! //! ### Simple Code Snippet //! -//! This is an example of a module that exposes a privileged function: +//! This is an example of a pallet that exposes a privileged function: //! //! ``` -//! use frame_support::{decl_module, dispatch}; -//! use frame_system::ensure_root; //! -//! pub trait Config: frame_system::Config {} +//! #[frame_support::pallet] +//! pub mod logger { +//! use frame_support::pallet_prelude::*; +//! use frame_system::pallet_prelude::*; +//! use super::*; //! -//! decl_module! { -//! pub struct Module for enum Call where origin: T::Origin { -//! #[weight = 0] -//! pub fn privileged_function(origin) -> dispatch::DispatchResult { +//! #[pallet::config] +//! pub trait Config: frame_system::Config {} +//! +//! #[pallet::pallet] +//! pub struct Pallet(PhantomData); +//! +//! #[pallet::hooks] +//! impl Hooks> for Pallet {} +//! +//! #[pallet::call] +//! impl Pallet { +//! #[pallet::weight(0)] +//! pub fn privileged_function(origin: OriginFor) -> DispatchResultWithPostInfo { //! ensure_root(origin)?; //! //! // do something... //! -//! Ok(()) +//! Ok(().into()) //! } -//! } +//! } //! } //! # fn main() {} //! ``` //! //! ## Genesis Config //! -//! The Sudo module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The Sudo pallet depends on the [`GenesisConfig`]. //! You need to set an initial superuser account as the sudo `key`. //! -//! ## Related Modules +//! ## Related Pallets //! //! * [Democracy](../pallet_democracy/index.html) //! @@ -89,35 +100,41 @@ use sp_std::prelude::*; use sp_runtime::{DispatchResult, traits::StaticLookup}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, + weights::GetDispatchInfo, + traits::UnfilteredDispatchable, }; -use frame_support::{ - weights::{Weight, GetDispatchInfo, Pays}, - traits::{UnfilteredDispatchable, Get}, - dispatch::DispatchResultWithPostInfo, -}; -use frame_system::ensure_signed; #[cfg(test)] mod mock; #[cfg(test)] mod tests; -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub use pallet::*; - /// A sudo-able call. - type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; -} +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::{*, DispatchResult}; -decl_module! { - /// Sudo module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; - fn deposit_event() = default; + /// A sudo-able call. + type Call: Parameter + UnfilteredDispatchable + GetDispatchInfo; + } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { /// Authenticates the sudo key and dispatches a function call with `Root` origin. /// /// The dispatch origin for this call must be _Signed_. @@ -128,17 +145,20 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = { + #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); (dispatch_info.weight.saturating_add(10_000), dispatch_info.class) - }] - fn sudo(origin, call: Box<::Call>) -> DispatchResultWithPostInfo { + })] + pub(crate) fn sudo( + origin: OriginFor, + call: Box<::Call>, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -153,14 +173,18 @@ decl_module! { /// - O(1). /// - The weight of this call is defined by the caller. /// # - #[weight = (*_weight, call.get_dispatch_info().class)] - fn sudo_unchecked_weight(origin, call: Box<::Call>, _weight: Weight) -> DispatchResultWithPostInfo { + #[pallet::weight((*_weight, call.get_dispatch_info().class))] + pub(crate) fn sudo_unchecked_weight( + origin: OriginFor, + call: Box<::Call>, + _weight: Weight, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Root.into()); - Self::deposit_event(RawEvent::Sudid(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::Sudid(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } @@ -174,14 +198,17 @@ decl_module! { /// - Limited storage reads. /// - One DB change. /// # - #[weight = 0] - fn set_key(origin, new: ::Source) -> DispatchResultWithPostInfo { + #[pallet::weight(0)] + pub(crate) fn set_key( + origin: OriginFor, + new: ::Source, + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; ensure!(sender == Self::key(), Error::::RequireSudo); let new = T::Lookup::lookup(new)?; - Self::deposit_event(RawEvent::KeyChanged(Self::key())); + Self::deposit_event(Event::KeyChanged(Self::key())); >::put(new); // Sudo user does not pay a fee. Ok(Pays::No.into()) @@ -198,7 +225,7 @@ decl_module! { /// - One DB write (event). /// - Weight of derivative `call` execution + 10,000. /// # - #[weight = { + #[pallet::weight({ let dispatch_info = call.get_dispatch_info(); ( dispatch_info.weight @@ -207,8 +234,9 @@ decl_module! { .saturating_add(T::DbWeight::get().reads_writes(1, 1)), dispatch_info.class, ) - }] - fn sudo_as(origin, + })] + pub(crate) fn sudo_as( + origin: OriginFor, who: ::Source, call: Box<::Call> ) -> DispatchResultWithPostInfo { @@ -220,35 +248,55 @@ decl_module! { let res = call.dispatch_bypass_filter(frame_system::RawOrigin::Signed(who).into()); - Self::deposit_event(RawEvent::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); + Self::deposit_event(Event::SudoAsDone(res.map(|_| ()).map_err(|e| e.error))); // Sudo user does not pay a fee. Ok(Pays::No.into()) } } -} -decl_event!( - pub enum Event where AccountId = ::AccountId { + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId")] + pub enum Event { /// A sudo just took place. \[result\] Sudid(DispatchResult), /// The \[sudoer\] just switched identity; the old key is supplied. - KeyChanged(AccountId), + KeyChanged(T::AccountId), /// A sudo just took place. \[result\] SudoAsDone(DispatchResult), } -); -decl_storage! { - trait Store for Module as Sudo { + #[pallet::error] + /// Error for the Sudo pallet + pub enum Error { + /// Sender must be the Sudo account + RequireSudo, + } + + /// The `AccountId` of the sudo key. + #[pallet::storage] + #[pallet::getter(fn key)] + pub(super) type Key = StorageValue<_, T::AccountId, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { /// The `AccountId` of the sudo key. - Key get(fn key) config(): T::AccountId; + pub key: T::AccountId, } -} -decl_error! { - /// Error for the Sudo module - pub enum Error for Module { - /// Sender must be the Sudo account - RequireSudo, + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + key: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.key); + } } } diff --git a/frame/sudo/src/mock.rs b/frame/sudo/src/mock.rs index cd242d491dae2..568799e1fe632 100644 --- a/frame/sudo/src/mock.rs +++ b/frame/sudo/src/mock.rs @@ -18,7 +18,7 @@ //! Test utilities use super::*; -use frame_support::{parameter_types, weights::Weight}; +use frame_support::{parameter_types, traits::GenesisBuild}; use sp_core::H256; use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; use sp_io; @@ -27,52 +27,80 @@ use frame_support::traits::Filter; use frame_system::limits; // Logger module to track execution. +#[frame_support::pallet] pub mod logger { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; use super::*; - use frame_system::ensure_root; + #[pallet::config] pub trait Config: frame_system::Config { - type Event: From> + Into<::Event>; + type Event: From> + IsType<::Event>; } - decl_storage! { - trait Store for Module as Logger { - AccountLog get(fn account_log): Vec; - I32Log get(fn i32_log): Vec; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet { + #[pallet::weight(*weight)] + pub(crate) fn privileged_i32_log( + origin: OriginFor, + i: i32, + weight: Weight + ) -> DispatchResultWithPostInfo { + // Ensure that the `origin` is `Root`. + ensure_root(origin)?; + >::append(i); + Self::deposit_event(Event::AppendI32(i, weight)); + Ok(().into()) } - } - decl_event! { - pub enum Event where AccountId = ::AccountId { - AppendI32(i32, Weight), - AppendI32AndAccount(AccountId, i32, Weight), + #[pallet::weight(*weight)] + pub(crate) fn non_privileged_log( + origin: OriginFor, + i: i32, + weight: Weight + ) -> DispatchResultWithPostInfo { + // Ensure that the `origin` is some signed account. + let sender = ensure_signed(origin)?; + >::append(i); + >::append(sender.clone()); + Self::deposit_event(Event::AppendI32AndAccount(sender, i, weight)); + Ok(().into()) } } - decl_module! { - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - - #[weight = *weight] - fn privileged_i32_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is `Root`. - ensure_root(origin)?; - ::append(i); - Self::deposit_event(RawEvent::AppendI32(i, weight)); - } - - #[weight = *weight] - fn non_privileged_log(origin, i: i32, weight: Weight){ - // Ensure that the `origin` is some signed account. - let sender = ensure_signed(origin)?; - ::append(i); - >::append(sender.clone()); - Self::deposit_event(RawEvent::AppendI32AndAccount(sender, i, weight)); - } - } + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId")] + pub enum Event { + AppendI32(i32, Weight), + AppendI32AndAccount(T::AccountId, i32, Weight), } + + #[pallet::storage] + #[pallet::getter(fn account_log)] + pub(super) type AccountLog = StorageValue< + _, + Vec, + ValueQuery + >; + + #[pallet::storage] + #[pallet::getter(fn i32_log)] + pub(super) type I32Log = StorageValue< + _, + Vec, + ValueQuery + >; } + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -123,6 +151,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } // Implement the logger module's `Config` on the Test runtime. diff --git a/frame/sudo/src/tests.rs b/frame/sudo/src/tests.rs index 4d2552b7b88b4..780e07676b29c 100644 --- a/frame/sudo/src/tests.rs +++ b/frame/sudo/src/tests.rs @@ -58,7 +58,7 @@ fn sudo_emits_events_correctly() { // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo(Origin::signed(1), call)); - let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); + let expected_event = TestEvent::sudo(Event::Sudid(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }) } @@ -97,7 +97,7 @@ fn sudo_unchecked_weight_emits_events_correctly() { // Should emit event to indicate success when called with the root `key` and `call` is `Ok`. let call = Box::new(Call::Logger(LoggerCall::privileged_i32_log(42, 1))); assert_ok!(Sudo::sudo_unchecked_weight(Origin::signed(1), call, 1_000)); - let expected_event = TestEvent::sudo(RawEvent::Sudid(Ok(()))); + let expected_event = TestEvent::sudo(Event::Sudid(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }) } @@ -124,11 +124,11 @@ fn set_key_emits_events_correctly() { // A root `key` can change the root `key`. assert_ok!(Sudo::set_key(Origin::signed(1), 2)); - let expected_event = TestEvent::sudo(RawEvent::KeyChanged(1)); + let expected_event = TestEvent::sudo(Event::KeyChanged(1)); assert!(System::events().iter().any(|a| a.event == expected_event)); // Double check. assert_ok!(Sudo::set_key(Origin::signed(2), 4)); - let expected_event = TestEvent::sudo(RawEvent::KeyChanged(2)); + let expected_event = TestEvent::sudo(Event::KeyChanged(2)); assert!(System::events().iter().any(|a| a.event == expected_event)); }); } @@ -164,7 +164,7 @@ fn sudo_as_emits_events_correctly() { // A non-privileged function will work when passed to `sudo_as` with the root `key`. let call = Box::new(Call::Logger(LoggerCall::non_privileged_log(42, 1))); assert_ok!(Sudo::sudo_as(Origin::signed(1), 2, call)); - let expected_event = TestEvent::sudo(RawEvent::SudoAsDone(Ok(()))); + let expected_event = TestEvent::sudo(Event::SudoAsDone(Ok(()))); assert!(System::events().iter().any(|a| a.event == expected_event)); }); } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 0951dbdea987d..e14f90197f060 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -167,6 +167,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result( } fn decl_outer_inherent<'a>( + runtime: &'a Ident, block: &'a syn::TypePath, unchecked_extrinsic: &'a syn::TypePath, pallet_declarations: impl Iterator, @@ -251,7 +253,8 @@ fn decl_outer_inherent<'a>( #scrate::impl_outer_inherent!( impl Inherents where Block = #block, - UncheckedExtrinsic = #unchecked_extrinsic + UncheckedExtrinsic = #unchecked_extrinsic, + Runtime = #runtime, { #(#pallets_tokens)* } diff --git a/frame/support/procedural/src/default_no_bound.rs b/frame/support/procedural/src/default_no_bound.rs new file mode 100644 index 0000000000000..ed35e057f0377 --- /dev/null +++ b/frame/support/procedural/src/default_no_bound.rs @@ -0,0 +1,98 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use syn::spanned::Spanned; + +/// Derive Clone but do not bound any generic. +pub fn derive_default_no_bound(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + let input: syn::DeriveInput = match syn::parse(input) { + Ok(input) => input, + Err(e) => return e.to_compile_error().into(), + }; + + let name = &input.ident; + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + + let impl_ = match input.data { + syn::Data::Struct(struct_) => match struct_.fields { + syn::Fields::Named(named) => { + let fields = named.named.iter() + .map(|i| &i.ident) + .map(|i| quote::quote_spanned!(i.span() => + #i: core::default::Default::default() + )); + + quote::quote!( Self { #( #fields, )* } ) + }, + syn::Fields::Unnamed(unnamed) => { + let fields = unnamed.unnamed.iter().enumerate() + .map(|(i, _)| syn::Index::from(i)) + .map(|i| quote::quote_spanned!(i.span() => + core::default::Default::default() + )); + + quote::quote!( Self ( #( #fields, )* ) ) + }, + syn::Fields::Unit => { + quote::quote!( Self ) + } + }, + syn::Data::Enum(enum_) => { + if let Some(first_variant) = enum_.variants.first() { + let variant_ident = &first_variant.ident; + match &first_variant.fields { + syn::Fields::Named(named) => { + let fields = named.named.iter() + .map(|i| &i.ident) + .map(|i| quote::quote_spanned!(i.span() => + #i: core::default::Default::default() + )); + + quote::quote!( #name :: #ty_generics :: #variant_ident { #( #fields, )* } ) + }, + syn::Fields::Unnamed(unnamed) => { + let fields = unnamed.unnamed.iter().enumerate() + .map(|(i, _)| syn::Index::from(i)) + .map(|i| quote::quote_spanned!(i.span() => + core::default::Default::default() + )); + + quote::quote!( #name :: #ty_generics :: #variant_ident ( #( #fields, )* ) ) + }, + syn::Fields::Unit => quote::quote!( #name :: #ty_generics :: #variant_ident ), + } + } else { + quote::quote!( Self ) + } + + }, + syn::Data::Union(_) => { + let msg = "Union type not supported by `derive(CloneNoBound)`"; + return syn::Error::new(input.span(), msg).to_compile_error().into() + }, + }; + + quote::quote!( + const _: () = { + impl #impl_generics core::default::Default for #name #ty_generics #where_clause { + fn default() -> Self { + #impl_ + } + } + }; + ).into() +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 2aecc5b993928..4cedf798821a9 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -27,6 +27,7 @@ mod transactional; mod debug_no_bound; mod clone_no_bound; mod partial_eq_no_bound; +mod default_no_bound; pub(crate) use storage::INHERENT_INSTANCE_NAME; use proc_macro::TokenStream; @@ -412,6 +413,12 @@ pub fn derive_eq_no_bound(input: TokenStream) -> TokenStream { ).into() } +/// derive `Default` but do no bound any generic. Docs are at `frame_support::DefaultNoBound`. +#[proc_macro_derive(DefaultNoBound)] +pub fn derive_default_no_bound(input: TokenStream) -> TokenStream { + default_no_bound::derive_default_no_bound(input) +} + #[proc_macro_attribute] pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStream { transactional::require_transactional(attr, input).unwrap_or_else(|e| e.to_compile_error().into()) @@ -421,3 +428,7 @@ pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStre pub fn crate_to_pallet_version(input: TokenStream) -> TokenStream { pallet_version::crate_to_pallet_version(input).unwrap_or_else(|e| e.to_compile_error()).into() } + +/// The number of module instances supported by the runtime, starting at index 1, +/// and up to `NUMBER_OF_INSTANCE`. +pub(crate) const NUMBER_OF_INSTANCE: u8 = 16; diff --git a/frame/support/procedural/src/pallet/expand/call.rs b/frame/support/procedural/src/pallet/expand/call.rs index 295cf14d37f04..301d3fc5d9fa8 100644 --- a/frame/support/procedural/src/pallet/expand/call.rs +++ b/frame/support/procedural/src/pallet/expand/call.rs @@ -108,26 +108,26 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { match *self { #( Self::#fn_name ( #( ref #args_name, )* ) => { - let base_weight = #fn_weight; + let __pallet_base_weight = #fn_weight; - let weight = < + let __pallet_weight = < dyn #frame_support::dispatch::WeighData<( #( & #args_type, )* )> - >::weigh_data(&base_weight, ( #( #args_name, )* )); + >::weigh_data(&__pallet_base_weight, ( #( #args_name, )* )); - let class = < + let __pallet_class = < dyn #frame_support::dispatch::ClassifyDispatch< ( #( & #args_type, )* ) > - >::classify_dispatch(&base_weight, ( #( #args_name, )* )); + >::classify_dispatch(&__pallet_base_weight, ( #( #args_name, )* )); - let pays_fee = < + let __pallet_pays_fee = < dyn #frame_support::dispatch::PaysFee<( #( & #args_type, )* )> - >::pays_fee(&base_weight, ( #( #args_name, )* )); + >::pays_fee(&__pallet_base_weight, ( #( #args_name, )* )); #frame_support::dispatch::DispatchInfo { - weight, - class, - pays_fee, + weight: __pallet_weight, + class: __pallet_class, + pays_fee: __pallet_pays_fee, } }, )* @@ -186,6 +186,7 @@ pub fn expand_call(def: &mut Def) -> proc_macro2::TokenStream { impl<#type_impl_gen> #pallet_ident<#type_use_gen> #where_clause { #[doc(hidden)] + #[allow(dead_code)] pub fn call_functions() -> &'static [#frame_support::dispatch::FunctionMetadata] { &[ #( #frame_support::dispatch::FunctionMetadata { diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs index cc35451b646f3..23ccdfa5ddc9a 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -29,6 +29,8 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { let genesis_config_item = &mut def.item.content.as_mut() .expect("Checked by def parser").1[genesis_config.index]; + let serde_crate = format!("{}::serde", frame_support); + match genesis_config_item { syn::Item::Enum(syn::ItemEnum { attrs, ..}) | syn::Item::Struct(syn::ItemStruct { attrs, .. }) | @@ -50,6 +52,7 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { attrs.push(syn::parse_quote!( #[serde(deny_unknown_fields)] )); attrs.push(syn::parse_quote!( #[serde(bound(serialize = ""))] )); attrs.push(syn::parse_quote!( #[serde(bound(deserialize = ""))] )); + attrs.push(syn::parse_quote!( #[serde(crate = #serde_crate)] )); }, _ => unreachable!("Checked by genesis_config parser"), } diff --git a/frame/support/procedural/src/pallet/expand/instances.rs b/frame/support/procedural/src/pallet/expand/instances.rs index c60cd5ebe8d81..9f48563ab7e6c 100644 --- a/frame/support/procedural/src/pallet/expand/instances.rs +++ b/frame/support/procedural/src/pallet/expand/instances.rs @@ -17,14 +17,15 @@ use proc_macro2::Span; use crate::pallet::Def; +use crate::NUMBER_OF_INSTANCE; /// * Provide inherent instance to be used by construct_runtime -/// * Provide Instance0 .. Instance16 for instantiable pallet +/// * Provide Instance1 ..= Instance16 for instantiable pallet pub fn expand_instances(def: &mut Def) -> proc_macro2::TokenStream { let frame_support = &def.frame_support; let inherent_ident = syn::Ident::new(crate::INHERENT_INSTANCE_NAME, Span::call_site()); let instances = if def.config.has_instance { - (0..16).map(|i| syn::Ident::new(&format!("Instance{}", i), Span::call_site())).collect() + (1..=NUMBER_OF_INSTANCE).map(|i| syn::Ident::new(&format!("Instance{}", i), Span::call_site())).collect() } else { vec![] }; diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index fd3230edd1e74..556c6515d4706 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -23,6 +23,7 @@ use crate::pallet::{Def, parse::helper::get_doc_literals}; /// * Implement ModuleErrorMetadata on Pallet /// * declare Module type alias for construct_runtime /// * replace the first field type of `struct Pallet` with `PhantomData` if it is `_` +/// * implementation of `PalletInfoAccess` information pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { let frame_support = &def.frame_support; let frame_system = &def.frame_system; @@ -134,5 +135,27 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { .put_into_storage::<::PalletInfo, Self>(); } } + + // Implement `PalletInfoAccess` for `Pallet` + impl<#type_impl_gen> #frame_support::traits::PalletInfoAccess + for #pallet_ident<#type_use_gen> + #config_where_clause + { + fn index() -> usize { + < + ::PalletInfo as #frame_support::traits::PalletInfo + >::index::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn name() -> &'static str { + < + ::PalletInfo as #frame_support::traits::PalletInfo + >::name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + } ) } diff --git a/frame/support/procedural/src/pallet/parse/config.rs b/frame/support/procedural/src/pallet/parse/config.rs index 045f2bff50e45..79d4680752b90 100644 --- a/frame/support/procedural/src/pallet/parse/config.rs +++ b/frame/support/procedural/src/pallet/parse/config.rs @@ -126,6 +126,10 @@ impl syn::parse::Parse for ConfigBoundParse { input.parse::()?; input.parse::()?; + if input.peek(syn::token::Lt) { + input.parse::()?; + } + Ok(Self(ident)) } } diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index 87dfabcefbaaa..6dfa5a13fe5b2 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -65,6 +65,7 @@ fn decl_genesis_config_and_impl_default( let genesis_struct = &genesis_config.genesis_struct; let genesis_impl = &genesis_config.genesis_impl; let genesis_where_clause = &genesis_config.genesis_where_clause; + let serde_crate = format!("{}::serde", scrate); quote!( /// Genesis config for the module, allow to build genesis storage. @@ -72,6 +73,7 @@ fn decl_genesis_config_and_impl_default( #[cfg(feature = "std")] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] + #[serde(crate = #serde_crate)] #serde_bug_bound pub struct GenesisConfig#genesis_struct_decl #genesis_where_clause { #( #config_fields )* diff --git a/frame/support/procedural/src/storage/instance_trait.rs b/frame/support/procedural/src/storage/instance_trait.rs index 5468c3d344193..a9e06c6299041 100644 --- a/frame/support/procedural/src/storage/instance_trait.rs +++ b/frame/support/procedural/src/storage/instance_trait.rs @@ -21,8 +21,8 @@ use proc_macro2::{TokenStream, Span}; use quote::quote; use super::DeclStorageDefExt; +use crate::NUMBER_OF_INSTANCE; -const NUMBER_OF_INSTANCE: usize = 16; pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; // Used to generate an instance implementation. @@ -30,6 +30,8 @@ struct InstanceDef { prefix: String, instance_struct: syn::Ident, doc: TokenStream, + // Index is same as instance number. Default is 0. + index: u8, } pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { @@ -39,13 +41,14 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre // Implementation of instances. if let Some(module_instance) = &def.module_instance { - let instance_defs = (0..NUMBER_OF_INSTANCE) + let instance_defs = (1..=NUMBER_OF_INSTANCE) .map(|i| { let name = format!("Instance{}", i); InstanceDef { instance_struct: syn::Ident::new(&name, proc_macro2::Span::call_site()), prefix: name, doc: quote!(#[doc=r"Module instance"]), + index: i, } }) .chain( @@ -53,6 +56,7 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre prefix: String::new(), instance_struct: ident.clone(), doc: quote!(#[doc=r"Default module instance"]), + index: 0, }) ); @@ -83,6 +87,8 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre /// instance. #[doc(hidden)] ), + // This is just to make the type system happy. Not actually used. + index: 0, }; impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); } @@ -116,6 +122,7 @@ fn create_and_impl_instance_struct( let instance_struct = &instance_def.instance_struct; let prefix = format!("{}{}", instance_def.prefix, def.crate_name.to_string()); let doc = &instance_def.doc; + let index = instance_def.index; quote! { // Those trait are derived because of wrong bounds for generics @@ -129,6 +136,7 @@ fn create_and_impl_instance_struct( pub struct #instance_struct; impl #instance_trait for #instance_struct { const PREFIX: &'static str = #prefix; + const INDEX: u8 = #index; } } } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index aede0404da19c..d6f133a8d20a3 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -1967,23 +1967,23 @@ macro_rules! decl_module { match *self { $( $call_type::$fn_name( $( ref $param_name ),* ) => { - let base_weight = $weight; - let weight = >::weigh_data( - &base_weight, + let __pallet_base_weight = $weight; + let __pallet_weight = >::weigh_data( + &__pallet_base_weight, ($( $param_name, )*) ); - let class = >::classify_dispatch( - &base_weight, + let __pallet_class = >::classify_dispatch( + &__pallet_base_weight, ($( $param_name, )*) ); - let pays_fee = >::pays_fee( - &base_weight, + let __pallet_pays_fee = >::pays_fee( + &__pallet_base_weight, ($( $param_name, )*) ); $crate::dispatch::DispatchInfo { - weight, - class, - pays_fee, + weight: __pallet_weight, + class: __pallet_class, + pays_fee: __pallet_pays_fee, } }, )* diff --git a/frame/support/src/genesis_config.rs b/frame/support/src/genesis_config.rs index 3f7f943603e42..e6ba86f9fe922 100644 --- a/frame/support/src/genesis_config.rs +++ b/frame/support/src/genesis_config.rs @@ -76,10 +76,13 @@ macro_rules! impl_outer_config { } $crate::paste::item! { + #[cfg(any(feature = "std", test))] + use $crate::serde as __genesis_config_serde_import__; #[cfg(any(feature = "std", test))] #[derive($crate::serde::Serialize, $crate::serde::Deserialize, Default)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] + #[serde(crate = "__genesis_config_serde_import__")] pub struct $main { $( pub [< $snake $(_ $instance )? >]: $config, diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index 3c201dff29c22..87e489bd8f4db 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -20,8 +20,10 @@ pub use crate::sp_std::vec::Vec; #[doc(hidden)] pub use crate::sp_runtime::traits::{Block as BlockT, Extrinsic}; #[doc(hidden)] -pub use sp_inherents::{InherentData, ProvideInherent, CheckInherentsResult, IsFatalError}; - +pub use sp_inherents::{ + InherentData, ProvideInherent, CheckInherentsResult, IsFatalError, InherentIdentifier, + MakeFatalError, +}; /// Implement the outer inherent. /// All given modules need to implement `ProvideInherent`. @@ -30,7 +32,11 @@ pub use sp_inherents::{InherentData, ProvideInherent, CheckInherentsResult, IsFa /// /// ```nocompile /// impl_outer_inherent! { -/// impl Inherents where Block = Block, UncheckedExtrinsic = UncheckedExtrinsic { +/// impl Inherents where +/// Block = Block, +/// UncheckedExtrinsic = UncheckedExtrinsic, +/// Runtime = Runtime, +/// { /// timestamp, /// consensus, /// aura, @@ -42,7 +48,8 @@ macro_rules! impl_outer_inherent { ( impl Inherents where Block = $block:ident, - UncheckedExtrinsic = $uncheckedextrinsic:ident + UncheckedExtrinsic = $uncheckedextrinsic:ident, + Runtime = $runtime:ident, { $( $module:ident, )* } @@ -56,16 +63,19 @@ macro_rules! impl_outer_inherent { impl InherentDataExt for $crate::inherent::InherentData { fn create_extrinsics(&self) -> $crate::inherent::Vec<<$block as $crate::inherent::BlockT>::Extrinsic> { - use $crate::inherent::{ProvideInherent, Extrinsic}; + use $crate::inherent::ProvideInherent; let mut inherents = Vec::new(); $( if let Some(inherent) = $module::create_inherent(self) { - inherents.push($uncheckedextrinsic::new( + let inherent = <$uncheckedextrinsic as $crate::inherent::Extrinsic>::new( inherent.into(), None, - ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return `Some`; qed")); + ).expect("Runtime UncheckedExtrinsic is not Opaque, so it has to return \ + `Some`; qed"); + + inherents.push(inherent); } )* @@ -74,41 +84,64 @@ macro_rules! impl_outer_inherent { fn check_extrinsics(&self, block: &$block) -> $crate::inherent::CheckInherentsResult { use $crate::inherent::{ProvideInherent, IsFatalError}; - use $crate::traits::IsSubType; + use $crate::traits::{IsSubType, ExtrinsicCall}; use $crate::sp_runtime::traits::Block as _; let mut result = $crate::inherent::CheckInherentsResult::new(); + for xt in block.extrinsics() { + // Inherents are before any other extrinsics. + // And signed extrinsics are not inherents. if $crate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { break } + let mut is_inherent = false; + $({ - if let Some(call) = IsSubType::<_>::is_sub_type(&xt.function) { - if let Err(e) = $module::check_inherent(call, self) { - result.put_error( - $module::INHERENT_IDENTIFIER, &e - ).expect("There is only one fatal error; qed"); - if e.is_fatal_error() { - return result + let call = <$uncheckedextrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if $module::is_inherent(call) { + is_inherent = true; + if let Err(e) = $module::check_inherent(call, self) { + result.put_error( + $module::INHERENT_IDENTIFIER, &e + ).expect("There is only one fatal error; qed"); + if e.is_fatal_error() { + return result + } } } } })* + + // Inherents are before any other extrinsics. + // No module marked it as inherent thus it is not. + if !is_inherent { + break + } } $( match $module::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - if $crate::inherent::Extrinsic::is_signed(xt).unwrap_or(false) { - return false + let is_signed = $crate::inherent::Extrinsic::is_signed(xt) + .unwrap_or(false); + + if !is_signed { + let call = < + $uncheckedextrinsic as ExtrinsicCall + >::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + $module::is_inherent(&call) + } else { + false + } + } else { + // Signed extrinsics are not inherents. + false } - - let call: Option<&<$module as ProvideInherent>::Call> = - xt.function.is_sub_type(); - - call.is_some() }); if !found { @@ -135,6 +168,46 @@ macro_rules! impl_outer_inherent { result } } + + impl $crate::traits::EnsureInherentsAreFirst<$block> for $runtime { + fn ensure_inherents_are_first(block: &$block) -> Result<(), u32> { + use $crate::inherent::ProvideInherent; + use $crate::traits::{IsSubType, ExtrinsicCall}; + use $crate::sp_runtime::traits::Block as _; + + let mut first_signed_observed = false; + + for (i, xt) in block.extrinsics().iter().enumerate() { + let is_signed = $crate::inherent::Extrinsic::is_signed(xt).unwrap_or(false); + + let is_inherent = if is_signed { + // Signed extrinsics are not inherents. + false + } else { + let mut is_inherent = false; + $({ + let call = <$uncheckedextrinsic as ExtrinsicCall>::call(xt); + if let Some(call) = IsSubType::<_>::is_sub_type(call) { + if $module::is_inherent(&call) { + is_inherent = true; + } + } + })* + is_inherent + }; + + if !is_inherent { + first_signed_observed = true; + } + + if first_signed_observed && is_inherent { + return Err(i as u32) + } + } + + Ok(()) + } + } }; } @@ -142,7 +215,6 @@ macro_rules! impl_outer_inherent { mod tests { use super::*; use sp_runtime::{traits, testing::{Header, self}}; - use crate::traits::IsSubType; #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] enum Call { @@ -162,7 +234,7 @@ mod tests { } } - impl IsSubType for Call { + impl crate::traits::IsSubType for Call { fn is_sub_type(&self) -> Option<&CallTest> { match self { Self::Test(test) => Some(test), @@ -171,7 +243,7 @@ mod tests { } } - impl IsSubType for Call { + impl crate::traits::IsSubType for Call { fn is_sub_type(&self) -> Option<&CallTest2> { match self { Self::Test2(test) => Some(test), @@ -182,13 +254,13 @@ mod tests { #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] enum CallTest { - Something, - SomethingElse, + OptionalInherent(bool), + NotInherent, } #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] enum CallTest2 { - Something, + RequiredInherent, } struct ModuleTest; @@ -198,15 +270,20 @@ mod tests { const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"test1235"; fn create_inherent(_: &InherentData) -> Option { - Some(CallTest::Something) + Some(CallTest::OptionalInherent(true)) } fn check_inherent(call: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { match call { - CallTest::Something => Ok(()), - CallTest::SomethingElse => Err(().into()), + CallTest::OptionalInherent(true) => Ok(()), + CallTest::OptionalInherent(false) => Err(().into()), + _ => unreachable!("other calls are not inherents"), } } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, CallTest::OptionalInherent(_)) + } } struct ModuleTest2; @@ -216,18 +293,23 @@ mod tests { const INHERENT_IDENTIFIER: sp_inherents::InherentIdentifier = *b"test1234"; fn create_inherent(_: &InherentData) -> Option { - Some(CallTest2::Something) + Some(CallTest2::RequiredInherent) } - fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { + fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(Some(().into())) } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, CallTest2::RequiredInherent) + } } type Block = testing::Block; #[derive(codec::Encode, codec::Decode, Clone, PartialEq, Eq, Debug, serde::Serialize)] struct Extrinsic { + signed: bool, function: Call, } @@ -235,15 +317,34 @@ mod tests { type Call = Call; type SignaturePayload = (); - fn new(function: Call, _: Option<()>) -> Option { - Some(Self { function }) + fn new(function: Call, signed_data: Option<()>) -> Option { + Some(Self { + function, + signed: signed_data.is_some(), + }) + } + + fn is_signed(&self) -> Option { + Some(self.signed) + } + } + + impl crate::traits::ExtrinsicCall for Extrinsic { + fn call(&self) -> &Self::Call { + &self.function } } parity_util_mem::malloc_size_of_is_0!(Extrinsic); + struct Runtime; + impl_outer_inherent! { - impl Inherents where Block = Block, UncheckedExtrinsic = Extrinsic { + impl Inherents where + Block = Block, + UncheckedExtrinsic = Extrinsic, + Runtime = Runtime, + { ModuleTest, ModuleTest2, } @@ -254,8 +355,8 @@ mod tests { let inherents = InherentData::new().create_extrinsics(); let expected = vec![ - Extrinsic { function: Call::Test(CallTest::Something) }, - Extrinsic { function: Call::Test2(CallTest2::Something) }, + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: false }, + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, ]; assert_eq!(expected, inherents); } @@ -265,8 +366,8 @@ mod tests { let block = Block::new( Header::new_from_number(1), vec![ - Extrinsic { function: Call::Test2(CallTest2::Something) }, - Extrinsic { function: Call::Test(CallTest::Something) }, + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: false }, ], ); @@ -275,8 +376,8 @@ mod tests { let block = Block::new( Header::new_from_number(1), vec![ - Extrinsic { function: Call::Test2(CallTest2::Something) }, - Extrinsic { function: Call::Test(CallTest::SomethingElse) }, + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, + Extrinsic { function: Call::Test(CallTest::OptionalInherent(false)), signed: false }, ], ); @@ -287,9 +388,84 @@ mod tests { fn required_inherents_enforced() { let block = Block::new( Header::new_from_number(1), - vec![Extrinsic { function: Call::Test(CallTest::Something) }], + vec![ + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: false } + ], ); assert!(InherentData::new().check_extrinsics(&block).fatal_error()); } + + #[test] + fn signed_are_not_inherent() { + let block = Block::new( + Header::new_from_number(1), + vec![ + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, + // NOTE: checking this call would fail, but it is not checked as it is not an + // inherent, because it is signed. + Extrinsic { function: Call::Test(CallTest::OptionalInherent(false)), signed: true }, + ], + ); + + assert!(InherentData::new().check_extrinsics(&block).ok()); + + let block = Block::new( + Header::new_from_number(1), + vec![ + // NOTE: this is not considered an inherent, thus block is failing because of + // missing required inherent. + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: true }, + ], + ); + + assert_eq!( + InherentData::new().check_extrinsics(&block).into_errors().collect::>(), + vec![(*b"test1234", vec![])], + ); + } + + #[test] + fn inherent_first_works() { + use crate::traits::EnsureInherentsAreFirst; + let block = Block::new( + Header::new_from_number(1), + vec![ + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: false }, + Extrinsic { function: Call::Test(CallTest::NotInherent), signed: false }, + Extrinsic { function: Call::Test(CallTest::NotInherent), signed: false }, + ], + ); + + assert!(Runtime::ensure_inherents_are_first(&block).is_ok()); + } + + #[test] + fn inherent_cannot_be_placed_after_non_inherent() { + use crate::traits::EnsureInherentsAreFirst; + let block = Block::new( + Header::new_from_number(1), + vec![ + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, + Extrinsic { function: Call::Test(CallTest::NotInherent), signed: false }, + // This inherent is placed after non inherent: invalid + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: false }, + ], + ); + + assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); + + let block = Block::new( + Header::new_from_number(1), + vec![ + Extrinsic { function: Call::Test2(CallTest2::RequiredInherent), signed: false }, + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: true }, + // This inherent is placed after non inherent: invalid + Extrinsic { function: Call::Test(CallTest::OptionalInherent(true)), signed: false }, + ], + ); + + assert_eq!(Runtime::ensure_inherents_are_first(&block).err().unwrap(), 2); + } } diff --git a/frame/support/src/instances.rs b/frame/support/src/instances.rs index 086ed9a6cc175..9908d16076a08 100644 --- a/frame/support/src/instances.rs +++ b/frame/support/src/instances.rs @@ -31,10 +31,6 @@ //! NOTE: [`frame_support::pallet`] will reexport them inside the module, in order to make them //! accessible to [`frame_support::construct_runtime`]. -/// Instance0 to be used for instantiable pallet define with `pallet` macro. -#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] -pub struct Instance0; - /// Instance1 to be used for instantiable pallet define with `pallet` macro. #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance1; @@ -94,3 +90,7 @@ pub struct Instance14; /// Instance15 to be used for instantiable pallet define with `pallet` macro. #[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] pub struct Instance15; + +/// Instance16 to be used for instantiable pallet define with `pallet` macro. +#[derive(Clone, Copy, PartialEq, Eq, crate::RuntimeDebugNoBound)] +pub struct Instance16; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 220e7a06bdf31..72c90018f755d 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -75,11 +75,15 @@ pub use self::hash::{ }; pub use self::storage::{ StorageValue, StorageMap, StorageDoubleMap, StoragePrefixedMap, IterableStorageMap, - IterableStorageDoubleMap, migration + IterableStorageDoubleMap, migration, + bounded_vec::{self, BoundedVec}, }; pub use self::dispatch::{Parameter, Callable}; pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; +use codec::{Encode, Decode}; +use sp_runtime::TypeId; + /// A unified log target for support operations. pub const LOG_TARGET: &'static str = "runtime::frame-support"; @@ -87,6 +91,131 @@ pub const LOG_TARGET: &'static str = "runtime::frame-support"; #[derive(Debug, PartialEq, Eq, Clone)] pub enum Never {} +/// A pallet identifier. These are per pallet and should be stored in a registry somewhere. +#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] +pub struct PalletId(pub [u8; 8]); + +impl TypeId for PalletId { + const TYPE_ID: [u8; 4] = *b"modl"; +} + +/// Generate a new type alias for [`storage::types::StorageValue`], +/// [`storage::types::StorageMap`] and [`storage::types::StorageDoubleMap`]. +/// +/// Useful for creating a *storage-like* struct for test and migrations. +/// +///``` +/// # use frame_support::generate_storage_alias; +/// use frame_support::codec; +/// use frame_support::Twox64Concat; +/// // generate a storage value with type u32. +/// generate_storage_alias!(Prefix, StorageName => Value); +/// +/// // generate a double map from `(u32, u32)` (with hasher `Twox64Concat`) to `Vec` +/// generate_storage_alias!( +/// OtherPrefix, OtherStorageName => DoubleMap< +/// (u32, u32), +/// (u32, u32), +/// Vec +/// > +/// ); +/// +/// // generate a map from `Config::AccountId` (with hasher `Twox64Concat`) to `Vec` +/// trait Config { type AccountId: codec::FullCodec; } +/// generate_storage_alias!( +/// Prefix, GenericStorage => Map<(Twox64Concat, T::AccountId), Vec> +/// ); +/// # fn main() {} +/// ``` +#[macro_export] +macro_rules! generate_storage_alias { + // without generic for $name. + ($pallet:ident, $name:ident => Map<($key:ty, $hasher:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageMap< + [<$name Instance>], + $hasher, + $key, + $value, + >; + } + }; + ($pallet:ident, $name:ident => DoubleMap<($key1:ty, $hasher1:ty), ($key2:ty, $hasher2:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageDoubleMap< + [<$name Instance>], + $hasher1, + $key1, + $hasher2, + $key2, + $value, + >; + } + }; + ($pallet:ident, $name:ident => Value<$value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + type $name = $crate::storage::types::StorageValue< + [<$name Instance>], + $value, + >; + } + }; + // with generic for $name. + ($pallet:ident, $name:ident<$t:ident : $bounds:tt> => Map<($key:ty, $hasher:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageMap< + [<$name Instance>], + $key, + $hasher, + $value, + >; + } + }; + ( + $pallet:ident, + $name:ident<$t:ident : $bounds:tt> + => DoubleMap<($key1:ty, $hasher1:ty), ($key2:ty, $hasher2:ty), $value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageDoubleMap< + [<$name Instance>], + $key1, + $hasher1, + $key2, + $hasher2, + $value, + >; + } + }; + ($pallet:ident, $name:ident<$t:ident : $bounds:tt> => Value<$value:ty>) => { + $crate::paste::paste! { + $crate::generate_storage_alias!(@GENERATE_INSTANCE_STRUCT $pallet, $name); + #[allow(type_alias_bounds)] + type $name<$t : $bounds> = $crate::storage::types::StorageValue< + [<$name Instance>], + $value, + $crate::storage::types::ValueQuery, + >; + } + }; + // helper used in all arms. + (@GENERATE_INSTANCE_STRUCT $pallet:ident, $name:ident) => { + $crate::paste::paste! { + struct [<$name Instance>]; + impl $crate::traits::StorageInstance for [<$name Instance>] { + fn pallet_prefix() -> &'static str { stringify!($pallet) } + const STORAGE_PREFIX: &'static str = stringify!($name); + } + } + }; +} + /// Create new implementations of the [`Get`](crate::traits::Get) trait. /// /// The so-called parameter type can be created in four different ways: @@ -250,21 +379,20 @@ macro_rules! parameter_types { } }; ( - $( - $( #[ $attr:meta ] )* - $vis:vis static $name:ident: $type:ty = $value:expr; - )* + $( #[ $attr:meta ] )* + $vis:vis static $name:ident: $type:ty = $value:expr; + $( $rest:tt )* ) => ( $crate::parameter_types_impl_thread_local!( - $( - $( #[ $attr ] )* - $vis static $name: $type = $value; - )* + $( #[ $attr ] )* + $vis static $name: $type = $value; ); + $crate::parameter_types!( $( $rest )* ); ); } #[cfg(not(feature = "std"))] +#[doc(inline)] #[macro_export] macro_rules! parameter_types_impl_thread_local { ( $( $any:tt )* ) => { @@ -273,6 +401,7 @@ macro_rules! parameter_types_impl_thread_local { } #[cfg(feature = "std")] +#[doc(inline)] #[macro_export] macro_rules! parameter_types_impl_thread_local { ( @@ -330,13 +459,16 @@ macro_rules! ord_parameter_types { ); () => (); (IMPL $name:ident , $type:ty , $value:expr) => { - impl $crate::traits::Contains<$type> for $name { + impl $crate::traits::SortedMembers<$type> for $name { fn contains(t: &$type) -> bool { &$value == t } fn sorted_members() -> $crate::sp_std::prelude::Vec<$type> { vec![$value] } fn count() -> usize { 1 } #[cfg(feature = "runtime-benchmarks")] fn add(_: &$type) {} } + impl $crate::traits::Contains<$type> for $name { + fn contains(t: &$type) -> bool { &$value == t } + } } } @@ -442,6 +574,25 @@ pub use frame_support_procedural::PartialEqNoBound; /// ``` pub use frame_support_procedural::DebugNoBound; +/// Derive [`Default`] but do not bound any generic. +/// +/// This is useful for type generic over runtime: +/// ``` +/// # use frame_support::DefaultNoBound; +/// # use core::default::Default; +/// trait Config { +/// type C: Default; +/// } +/// +/// // Foo implements [`Default`] because `C` bounds [`Default`]. +/// // Otherwise compilation will fail with an output telling `c` doesn't implement [`Default`]. +/// #[derive(DefaultNoBound)] +/// struct Foo { +/// c: T::C, +/// } +/// ``` +pub use frame_support_procedural::DefaultNoBound; + /// Assert the annotated function is executed within a storage transaction. /// /// The assertion is enabled for native execution and when `debug_assertions` are enabled. @@ -1066,6 +1217,12 @@ pub mod tests { assert_eq!(300, StorageParameter::get()); }) } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub static Members: Vec = vec![]; + pub const Foo: Option = None; + } } /// Prelude to be used alongside pallet macro, for ease of use. @@ -1077,7 +1234,7 @@ pub mod pallet_prelude { EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DebugNoBound, CloneNoBound, Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, ensure, RuntimeDebug, storage, - traits::{Get, Hooks, IsType, GetPalletVersion, EnsureOrigin}, + traits::{Get, Hooks, IsType, GetPalletVersion, EnsureOrigin, PalletInfoAccess}, dispatch::{DispatchResultWithPostInfo, Parameter, DispatchError, DispatchResult}, weights::{DispatchClass, Pays, Weight}, storage::types::{StorageValue, StorageMap, StorageDoubleMap, ValueQuery, OptionQuery}, @@ -1205,6 +1362,10 @@ pub mod pallet_prelude { /// /// It declare `type Module` type alias for `Pallet`, used by [`construct_runtime`]. /// +/// It implements [`traits::PalletInfoAccess`] on `Pallet` to ease access to pallet informations +/// given by [`frame_support::traits::PalletInfo`]. +/// (The implementation use the associated type `frame_system::Config::PalletInfo`). +/// /// If attribute generate_store then macro create the trait `Store` and implement it on `Pallet`. /// /// # Hooks: `#[pallet::hooks]` mandatory @@ -1782,6 +1943,10 @@ pub mod pallet_prelude { /// fn create_inherent(_data: &InherentData) -> Option { /// unimplemented!(); /// } +/// +/// fn is_inherent(_call: &Self::Call) -> bool { +/// unimplemented!(); +/// } /// } /// /// // Regular rust code needed for implementing ProvideInherent trait @@ -1909,6 +2074,10 @@ pub mod pallet_prelude { /// fn create_inherent(_data: &InherentData) -> Option { /// unimplemented!(); /// } +/// +/// fn is_inherent(_call: &Self::Call) -> bool { +/// unimplemented!(); +/// } /// } /// /// // Regular rust code needed for implementing ProvideInherent trait diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index 19b24fb84bb1a..6dd38eb1b2ab4 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -246,6 +246,16 @@ macro_rules! impl_outer_origin { &self.caller } + fn try_with_caller( + mut self, + f: impl FnOnce(Self::PalletsOrigin) -> Result, + ) -> Result { + match f(self.caller) { + Ok(r) => Ok(r), + Err(caller) => { self.caller = caller; Err(self) } + } + } + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self { $system::RawOrigin::None.into() @@ -299,6 +309,20 @@ macro_rules! impl_outer_origin { $caller_name::system(x) } } + + impl $crate::sp_std::convert::TryFrom<$caller_name> for $system::Origin<$runtime> { + type Error = $caller_name; + fn try_from(x: $caller_name) + -> $crate::sp_std::result::Result<$system::Origin<$runtime>, $caller_name> + { + if let $caller_name::system(l) = x { + Ok(l) + } else { + Err(x) + } + } + } + impl From<$system::Origin<$runtime>> for $name { /// Convert to runtime origin: /// * root origin is built with no filter @@ -376,6 +400,22 @@ macro_rules! impl_outer_origin { } } } + + impl $crate::sp_std::convert::TryFrom< + $caller_name + > for $module::Origin < $( $generic )? $(, $module::$generic_instance )? > { + type Error = $caller_name; + fn try_from(x: $caller_name) -> $crate::sp_std::result::Result< + $module::Origin < $( $generic )? $(, $module::$generic_instance )? >, + $caller_name, + > { + if let $caller_name::[< $module $( _ $generic_instance )? >](l) = x { + Ok(l) + } else { + Err(x) + } + } + } } )* } diff --git a/frame/support/src/storage/bounded_vec.rs b/frame/support/src/storage/bounded_vec.rs new file mode 100644 index 0000000000000..9fcfe4035294f --- /dev/null +++ b/frame/support/src/storage/bounded_vec.rs @@ -0,0 +1,470 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits, types and structs to support putting a bounded vector into storage, as a raw value, map +//! or a double map. + +use sp_std::prelude::*; +use sp_std::{convert::TryFrom, marker::PhantomData}; +use codec::{FullCodec, Encode, EncodeLike, Decode}; +use crate::{ + traits::Get, + storage::{generator, StorageDecodeLength, StorageValue, StorageMap, StorageDoubleMap}, +}; + +/// Marker trait for types `T` that can be stored in storage as `BoundedVec`. +pub trait BoundedVecValue: FullCodec + Clone + sp_std::fmt::Debug {} +impl BoundedVecValue for T {} + +/// A bounded vector. +/// +/// It has implementations for efficient append and length decoding, as with a normal `Vec<_>`, once +/// put into storage as a raw value, map or double-map. +/// +/// As the name suggests, the length of the queue is always bounded. All internal operations ensure +/// this bound is respected. +#[derive(Encode, Decode, crate::DefaultNoBound, crate::CloneNoBound, crate::DebugNoBound)] +pub struct BoundedVec>(Vec, PhantomData); + +// NOTE: we could also implement this as: +// impl, S2: Get> PartialEq> for BoundedVec +// to allow comparison of bounded vectors with different bounds. +impl> PartialEq for BoundedVec { + fn eq(&self, rhs: &Self) -> bool { + self.0 == rhs.0 + } +} +impl> Eq for BoundedVec {} + +impl> BoundedVec { + /// Get the bound of the type in `usize`. + pub fn bound() -> usize { + S::get() as usize + } + + /// Create `Self` from `t` without any checks. + /// + /// # WARNING + /// + /// Only use when you are sure you know what you are doing. + fn unchecked_from(t: Vec) -> Self { + Self(t, Default::default()) + } + + /// Create `Self` from `t` without any checks. Logs warnings if the bound is not being + /// respected. The additional scope can be used to indicate where a potential overflow is + /// happening. + /// + /// # WARNING + /// + /// Only use when you are sure you know what you are doing. + pub fn force_from(t: Vec, scope: Option<&'static str>) -> Self { + if t.len() > Self::bound() { + log::warn!( + target: crate::LOG_TARGET, + "length of a bounded vector in scope {} is not respected.", + scope.unwrap_or("UNKNOWN"), + ); + } + + Self::unchecked_from(t) + } + + /// Consume self, and return the inner `Vec`. Henceforth, the `Vec<_>` can be altered in an + /// arbitrary way. At some point, if the reverse conversion is required, `TryFrom>` can + /// be used. + /// + /// This is useful for cases if you need access to an internal API of the inner `Vec<_>` which + /// is not provided by the wrapper `BoundedVec`. + pub fn into_inner(self) -> Vec { + debug_assert!(self.0.len() <= Self::bound()); + self.0 + } + + /// Consumes self and mutates self via the given `mutate` function. + /// + /// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is + /// returned. + /// + /// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` -> + /// [`Self::try_from`]. + pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut Vec)) -> Option { + mutate(&mut self.0); + (self.0.len() <= Self::bound()).then(move || self) + } + + /// Exactly the same semantics as [`Vec::insert`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if `index > len`. + pub fn try_insert(&mut self, index: usize, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.insert(index, element); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::push`], but returns an `Err` (and is a noop) if the + /// new length of the vector exceeds `S`. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds isize::MAX bytes. + pub fn try_push(&mut self, element: T) -> Result<(), ()> { + if self.len() < Self::bound() { + self.0.push(element); + Ok(()) + } else { + Err(()) + } + } + + /// Exactly the same semantics as [`Vec::remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn remove(&mut self, index: usize) { + self.0.remove(index); + } + + /// Exactly the same semantics as [`Vec::swap_remove`]. + /// + /// # Panics + /// + /// Panics if `index` is out of bounds. + pub fn swap_remove(&mut self, index: usize) { + self.0.swap_remove(index); + } + + /// Exactly the same semantics as [`Vec::retain`]. + pub fn retain bool>(&mut self, f: F) { + self.0.retain(f) + } +} + +impl> TryFrom> for BoundedVec { + type Error = (); + fn try_from(t: Vec) -> Result { + if t.len() <= Self::bound() { + Ok(Self::unchecked_from(t)) + } else { + Err(()) + } + } +} + +// It is okay to give a non-mutable reference of the inner vec to anyone. +impl> AsRef> for BoundedVec { + fn as_ref(&self) -> &Vec { + &self.0 + } +} + +// will allow for immutable all operations of `Vec` on `BoundedVec`. +impl> sp_std::ops::Deref for BoundedVec { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +// Allows for indexing similar to a normal `Vec`. Can panic if out of bound. +impl> sp_std::ops::Index for BoundedVec { + type Output = T; + fn index(&self, index: usize) -> &Self::Output { + self.get(index).expect("index out of bound") + } +} + +impl> sp_std::iter::IntoIterator for BoundedVec { + type Item = T; + type IntoIter = sp_std::vec::IntoIter; + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl> codec::DecodeLength for BoundedVec { + fn len(self_encoded: &[u8]) -> Result { + // `BoundedVec` stored just a `Vec`, thus the length is at the beginning in + // `Compact` form, and same implementation as `Vec` can be used. + as codec::DecodeLength>::len(self_encoded) + } +} + +impl> StorageDecodeLength for BoundedVec {} + +/// Storage value that is *maybe* capable of [`StorageAppend`](crate::storage::StorageAppend). +pub trait TryAppendValue> { + /// Try and append the `item` into the storage item. + /// + /// This might fail if bounds are not respected. + fn try_append>(item: LikeT) -> Result<(), ()>; +} + +/// Storage map that is *maybe* capable of [`StorageAppend`](crate::storage::StorageAppend). +pub trait TryAppendMap> { + /// Try and append the `item` into the storage map at the given `key`. + /// + /// This might fail if bounds are not respected. + fn try_append + Clone, LikeT: EncodeLike>( + key: LikeK, + item: LikeT, + ) -> Result<(), ()>; +} + +/// Storage double map that is *maybe* capable of [`StorageAppend`](crate::storage::StorageAppend). +pub trait TryAppendDoubleMap> { + /// Try and append the `item` into the storage double map at the given `key`. + /// + /// This might fail if bounds are not respected. + fn try_append< + LikeK1: EncodeLike + Clone, + LikeK2: EncodeLike + Clone, + LikeT: EncodeLike, + >( + key1: LikeK1, + key2: LikeK2, + item: LikeT, + ) -> Result<(), ()>; +} + +impl, StorageValueT: generator::StorageValue>> + TryAppendValue for StorageValueT +{ + fn try_append>(item: LikeT) -> Result<(), ()> { + let bound = BoundedVec::::bound(); + let current = Self::decode_len().unwrap_or_default(); + if current < bound { + // NOTE: we cannot reuse the implementation for `Vec` here because we never want to + // mark `BoundedVec` as `StorageAppend`. + let key = Self::storage_value_final_key(); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + +impl< + K: FullCodec, + T: BoundedVecValue, + S: Get, + StorageMapT: generator::StorageMap>, + > TryAppendMap for StorageMapT +{ + fn try_append + Clone, LikeT: EncodeLike>( + key: LikeK, + item: LikeT, + ) -> Result<(), ()> { + let bound = BoundedVec::::bound(); + let current = Self::decode_len(key.clone()).unwrap_or_default(); + if current < bound { + let key = Self::storage_map_final_key(key); + sp_io::storage::append(&key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + +impl< + K1: FullCodec, + K2: FullCodec, + T: BoundedVecValue, + S: Get, + StorageDoubleMapT: generator::StorageDoubleMap>, + > TryAppendDoubleMap for StorageDoubleMapT +{ + fn try_append< + LikeK1: EncodeLike + Clone, + LikeK2: EncodeLike + Clone, + LikeT: EncodeLike, + >( + key1: LikeK1, + key2: LikeK2, + item: LikeT, + ) -> Result<(), ()> { + let bound = BoundedVec::::bound(); + let current = Self::decode_len(key1.clone(), key2.clone()).unwrap_or_default(); + if current < bound { + let double_map_key = Self::storage_double_map_final_key(key1, key2); + sp_io::storage::append(&double_map_key, item.encode()); + Ok(()) + } else { + Err(()) + } + } +} + +#[cfg(test)] +pub mod test { + use super::*; + use sp_io::TestExternalities; + use sp_std::convert::TryInto; + use crate::{assert_ok, Twox128}; + + crate::parameter_types! { + pub const Seven: u32 = 7; + pub const Four: u32 = 4; + } + + crate::generate_storage_alias! { Prefix, Foo => Value> } + crate::generate_storage_alias! { Prefix, FooMap => Map<(u32, Twox128), BoundedVec> } + crate::generate_storage_alias! { + Prefix, + FooDoubleMap => DoubleMap<(u32, Twox128), (u32, Twox128), BoundedVec> + } + + #[test] + fn decode_len_works() { + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + Foo::put(bounded); + assert_eq!(Foo::decode_len().unwrap(), 3); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooMap::insert(1, bounded); + assert_eq!(FooMap::decode_len(1).unwrap(), 3); + assert!(FooMap::decode_len(0).is_none()); + assert!(FooMap::decode_len(2).is_none()); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooDoubleMap::insert(1, 1, bounded); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 3); + assert!(FooDoubleMap::decode_len(2, 1).is_none()); + assert!(FooDoubleMap::decode_len(1, 2).is_none()); + assert!(FooDoubleMap::decode_len(2, 2).is_none()); + }); + } + + #[test] + fn try_append_works() { + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + Foo::put(bounded); + assert_ok!(Foo::try_append(4)); + assert_ok!(Foo::try_append(5)); + assert_ok!(Foo::try_append(6)); + assert_ok!(Foo::try_append(7)); + assert_eq!(Foo::decode_len().unwrap(), 7); + assert!(Foo::try_append(8).is_err()); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooMap::insert(1, bounded); + + assert_ok!(FooMap::try_append(1, 4)); + assert_ok!(FooMap::try_append(1, 5)); + assert_ok!(FooMap::try_append(1, 6)); + assert_ok!(FooMap::try_append(1, 7)); + assert_eq!(FooMap::decode_len(1).unwrap(), 7); + assert!(FooMap::try_append(1, 8).is_err()); + + // append to a non-existing + assert!(FooMap::get(2).is_none()); + assert_ok!(FooMap::try_append(2, 4)); + assert_eq!(FooMap::get(2).unwrap(), BoundedVec::::unchecked_from(vec![4])); + assert_ok!(FooMap::try_append(2, 5)); + assert_eq!( + FooMap::get(2).unwrap(), + BoundedVec::::unchecked_from(vec![4, 5]) + ); + }); + + TestExternalities::default().execute_with(|| { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + FooDoubleMap::insert(1, 1, bounded); + + assert_ok!(FooDoubleMap::try_append(1, 1, 4)); + assert_ok!(FooDoubleMap::try_append(1, 1, 5)); + assert_ok!(FooDoubleMap::try_append(1, 1, 6)); + assert_ok!(FooDoubleMap::try_append(1, 1, 7)); + assert_eq!(FooDoubleMap::decode_len(1, 1).unwrap(), 7); + assert!(FooDoubleMap::try_append(1, 1, 8).is_err()); + + // append to a non-existing + assert!(FooDoubleMap::get(2, 1).is_none()); + assert_ok!(FooDoubleMap::try_append(2, 1, 4)); + assert_eq!( + FooDoubleMap::get(2, 1).unwrap(), + BoundedVec::::unchecked_from(vec![4]) + ); + assert_ok!(FooDoubleMap::try_append(2, 1, 5)); + assert_eq!( + FooDoubleMap::get(2, 1).unwrap(), + BoundedVec::::unchecked_from(vec![4, 5]) + ); + }); + } + + #[test] + fn try_insert_works() { + let mut bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(1, 0).unwrap(); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + + assert!(bounded.try_insert(0, 9).is_err()); + assert_eq!(*bounded, vec![1, 0, 2, 3]); + } + + #[test] + #[should_panic(expected = "insertion index (is 9) should be <= len (is 3)")] + fn try_inert_panics_if_oob() { + let mut bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_insert(9, 0).unwrap(); + } + + #[test] + fn try_push_works() { + let mut bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + bounded.try_push(0).unwrap(); + assert_eq!(*bounded, vec![1, 2, 3, 0]); + + assert!(bounded.try_push(9).is_err()); + } + + #[test] + fn deref_coercion_works() { + let bounded: BoundedVec = vec![1, 2, 3].try_into().unwrap(); + // these methods come from deref-ed vec. + assert_eq!(bounded.len(), 3); + assert!(bounded.iter().next().is_some()); + assert!(!bounded.is_empty()); + } + + #[test] + fn try_mutate_works() { + let bounded: BoundedVec = vec![1, 2, 3, 4, 5, 6].try_into().unwrap(); + let bounded = bounded.try_mutate(|v| v.push(7)).unwrap(); + assert_eq!(bounded.len(), 7); + assert!(bounded.try_mutate(|v| v.push(8)).is_none()); + } +} diff --git a/frame/support/src/storage/generator/value.rs b/frame/support/src/storage/generator/value.rs index 093dcb305e64a..e07c952320aa5 100644 --- a/frame/support/src/storage/generator/value.rs +++ b/frame/support/src/storage/generator/value.rs @@ -77,10 +77,8 @@ impl> storage::StorageValue for G { let key = Self::storage_value_final_key(); // attempt to get the length directly. - let maybe_old = match unhashed::get_raw(&key) { - Some(old_data) => Some(O::decode(&mut &old_data[..]).map_err(|_| ())?), - None => None, - }; + let maybe_old = unhashed::get_raw(&key) + .map(|old_data| O::decode(&mut &old_data[..]).map_err(|_| ())).transpose()?; let maybe_new = f(maybe_old); if let Some(new) = maybe_new.as_ref() { new.using_encoded(|d| unhashed::put_raw(&key, d)); diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index b29a0b83652d9..b4a1a9225dd1f 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -34,11 +34,14 @@ pub struct StorageIterator { impl StorageIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated(note="Please use the storage_iter or storage_iter_with_suffix functions instead")] pub fn new(module: &[u8], item: &[u8]) -> Self { + #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated(note="Please use the storage_iter or storage_iter_with_suffix functions instead")] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); prefix.extend_from_slice(&Twox128::hash(module)); @@ -92,11 +95,14 @@ pub struct StorageKeyIterator { impl StorageKeyIterator { /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated(note="Please use the storage_key_iter or storage_key_iter_with_suffix functions instead")] pub fn new(module: &[u8], item: &[u8]) -> Self { + #[allow(deprecated)] Self::with_suffix(module, item, &[][..]) } /// Construct iterator to iterate over map items in `module` for the map called `item`. + #[deprecated(note="Please use the storage_key_iter or storage_key_iter_with_suffix functions instead")] pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { let mut prefix = Vec::new(); prefix.extend_from_slice(&Twox128::hash(module)); @@ -148,6 +154,58 @@ impl Iterator } } +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_iter(module: &[u8], item: &[u8]) -> PrefixIterator<(Vec, T)> { + storage_iter_with_suffix(module, item, &[][..]) +} + +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_iter_with_suffix( + module: &[u8], + item: &[u8], + suffix: &[u8], +) -> PrefixIterator<(Vec, T)> { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&Twox128::hash(module)); + prefix.extend_from_slice(&Twox128::hash(item)); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let value = T::decode(&mut &raw_value[..])?; + Ok((raw_key_without_prefix.to_vec(), value)) + }; + + PrefixIterator { prefix, previous_key, drain: false, closure } +} + +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_key_iter( + module: &[u8], + item: &[u8], +) -> PrefixIterator<(K, T)> { + storage_key_iter_with_suffix::(module, item, &[][..]) +} + +/// Construct iterator to iterate over map items in `module` for the map called `item`. +pub fn storage_key_iter_with_suffix( + module: &[u8], + item: &[u8], + suffix: &[u8], +) -> PrefixIterator<(K, T)> { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&Twox128::hash(module)); + prefix.extend_from_slice(&Twox128::hash(item)); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let mut key_material = H::reverse(raw_key_without_prefix); + let key = K::decode(&mut key_material)?; + let value = T::decode(&mut &raw_value[..])?; + Ok((key, value)) + }; + PrefixIterator { prefix, previous_key, drain: false, closure } +} + /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { get_storage_value::<()>(module, item, hash).is_some() @@ -294,7 +352,13 @@ mod tests { hash::StorageHasher, }; use sp_io::TestExternalities; - use super::{move_prefix, move_pallet, move_storage_from_pallet}; + use super::{ + move_prefix, + move_pallet, + move_storage_from_pallet, + storage_iter, + storage_key_iter, + }; struct OldPalletStorageValuePrefix; impl frame_support::traits::StorageInstance for OldPalletStorageValuePrefix { @@ -386,4 +450,31 @@ mod tests { assert_eq!(NewStorageMap::iter().collect::>(), vec![(1, 2), (3, 4)]); }) } + + #[test] + fn test_storage_iter() { + TestExternalities::new_empty().execute_with(|| { + OldStorageValue::put(3); + OldStorageMap::insert(1, 2); + OldStorageMap::insert(3, 4); + + assert_eq!( + storage_key_iter::(b"my_old_pallet", b"foo_map").collect::>(), + vec![(1, 2), (3, 4)], + ); + + assert_eq!( + storage_iter(b"my_old_pallet", b"foo_map").drain().map(|t| t.1).collect::>(), + vec![2, 4], + ); + assert_eq!(OldStorageMap::iter().collect::>(), vec![]); + + // Empty because storage iterator skips over the entry under the first key + assert_eq!( + storage_iter::(b"my_old_pallet", b"foo_value").drain().next(), + None + ); + assert_eq!(OldStorageValue::get(), Some(3)); + }); + } } diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index d9820475a7e88..adcf44a64620e 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -17,14 +17,19 @@ //! Stuff to do with the runtime's storage. +use sp_core::storage::ChildInfo; use sp_std::prelude::*; use codec::{FullCodec, FullEncode, Encode, EncodeLike, Decode}; -use crate::hash::{Twox128, StorageHasher}; +use crate::{ + hash::{Twox128, StorageHasher, ReversibleStorageHasher}, + traits::Get, +}; use sp_runtime::generic::{Digest, DigestItem}; pub use sp_runtime::TransactionOutcome; pub mod unhashed; pub mod hashed; +pub mod bounded_vec; pub mod child; #[doc(hidden)] pub mod generator; @@ -519,6 +524,14 @@ pub struct PrefixIterator { closure: fn(&[u8], &[u8]) -> Result, } +impl PrefixIterator { + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } +} + impl Iterator for PrefixIterator { type Item = T; @@ -563,6 +576,133 @@ impl Iterator for PrefixIterator { } } +/// Iterate over a prefix of a child trie and decode raw_key and raw_value into `T`. +/// +/// If any decoding fails it skips the key and continues to the next one. +pub struct ChildTriePrefixIterator { + /// The prefix iterated on + prefix: Vec, + /// child info for child trie + child_info: ChildInfo, + /// The last key iterated on + previous_key: Vec, + /// If true then values are removed while iterating + drain: bool, + /// Whether or not we should fetch the previous key + fetch_previous_key: bool, + /// Function that takes `(raw_key_without_prefix, raw_value)` and decode `T`. + /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. + closure: fn(&[u8], &[u8]) -> Result, +} + +impl ChildTriePrefixIterator { + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } +} + +impl ChildTriePrefixIterator<(Vec, T)> { + /// Construct iterator to iterate over child trie items in `child_info` with the prefix `prefix`. + /// + /// NOTE: Iterator with [`Self::drain`] will remove any value who failed to decode + pub fn with_prefix(child_info: &ChildInfo, prefix: &[u8]) -> Self { + let prefix = prefix.to_vec(); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let value = T::decode(&mut &raw_value[..])?; + Ok((raw_key_without_prefix.to_vec(), value)) + }; + + Self { + prefix, + child_info: child_info.clone(), + previous_key, + drain: false, + fetch_previous_key: true, + closure, + } + } +} + +impl ChildTriePrefixIterator<(K, T)> { + /// Construct iterator to iterate over child trie items in `child_info` with the prefix `prefix`. + /// + /// NOTE: Iterator with [`Self::drain`] will remove any key or value who failed to decode + pub fn with_prefix_over_key(child_info: &ChildInfo, prefix: &[u8]) -> Self { + let prefix = prefix.to_vec(); + let previous_key = prefix.clone(); + let closure = |raw_key_without_prefix: &[u8], raw_value: &[u8]| { + let mut key_material = H::reverse(raw_key_without_prefix); + let key = K::decode(&mut key_material)?; + let value = T::decode(&mut &raw_value[..])?; + Ok((key, value)) + }; + + Self { + prefix, + child_info: child_info.clone(), + previous_key, + drain: false, + fetch_previous_key: true, + closure, + } + } +} + +impl Iterator for ChildTriePrefixIterator { + type Item = T; + + fn next(&mut self) -> Option { + loop { + let maybe_next = if self.fetch_previous_key { + self.fetch_previous_key = false; + Some(self.previous_key.clone()) + } else { + sp_io::default_child_storage::next_key( + &self.child_info.storage_key(), + &self.previous_key, + ) + .filter(|n| n.starts_with(&self.prefix)) + }; + break match maybe_next { + Some(next) => { + self.previous_key = next; + let raw_value = match child::get_raw(&self.child_info, &self.previous_key) { + Some(raw_value) => raw_value, + None => { + log::error!( + "next_key returned a key with no value at {:?}", + self.previous_key, + ); + continue + } + }; + if self.drain { + child::kill(&self.child_info, &self.previous_key) + } + let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; + let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { + Ok(item) => item, + Err(e) => { + log::error!( + "(key, value) failed to decode at {:?}: {:?}", + self.previous_key, + e, + ); + continue + } + }; + + Some(item) + } + None => None, + } + } + } +} + /// Trait for maps that store all its value after a unique prefix. /// /// By default the final prefix is: @@ -670,25 +810,28 @@ pub trait StorageDecodeLength: private::Sealed + codec::DecodeLength { /// outside of this crate. mod private { use super::*; + use bounded_vec::{BoundedVecValue, BoundedVec}; pub trait Sealed {} impl Sealed for Vec {} impl Sealed for Digest {} + impl> Sealed for BoundedVec {} } impl StorageAppend for Vec {} impl StorageDecodeLength for Vec {} -/// We abuse the fact that SCALE does not put any marker into the encoding, i.e. -/// we only encode the internal vec and we can append to this vec. We have a test that ensures -/// that if the `Digest` format ever changes, we need to remove this here. +/// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the +/// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` +/// format ever changes, we need to remove this here. impl StorageAppend> for Digest {} #[cfg(test)] mod test { use super::*; use sp_core::hashing::twox_128; + use crate::hash::Identity; use sp_io::TestExternalities; use generator::StorageValue as _; @@ -825,4 +968,78 @@ mod test { }); }); } + + #[test] + fn child_trie_prefixed_map_works() { + TestExternalities::default().execute_with(|| { + let child_info_a = child::ChildInfo::new_default(b"a"); + child::put(&child_info_a, &[1, 2, 3], &8u16); + child::put(&child_info_a, &[2], &8u16); + child::put(&child_info_a, &[2, 1, 3], &8u8); + child::put(&child_info_a, &[2, 2, 3], &8u16); + child::put(&child_info_a, &[3], &8u16); + + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[2]) + .collect::, u16)>>(), + vec![ + (vec![], 8), + (vec![2, 3], 8), + ], + ); + + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[2]) + .drain() + .collect::, u16)>>(), + vec![ + (vec![], 8), + (vec![2, 3], 8), + ], + ); + + // The only remaining is the ones outside prefix + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[]) + .collect::, u8)>>(), + vec![ + (vec![1, 2, 3], 8), + (vec![3], 8), + ], + ); + + child::put(&child_info_a, &[1, 2, 3], &8u16); + child::put(&child_info_a, &[2], &8u16); + child::put(&child_info_a, &[2, 1, 3], &8u8); + child::put(&child_info_a, &[2, 2, 3], &8u16); + child::put(&child_info_a, &[3], &8u16); + + assert_eq!( + ChildTriePrefixIterator::with_prefix_over_key::(&child_info_a, &[2]) + .collect::>(), + vec![ + (u16::decode(&mut &[2, 3][..]).unwrap(), 8), + ], + ); + + assert_eq!( + ChildTriePrefixIterator::with_prefix_over_key::(&child_info_a, &[2]) + .drain() + .collect::>(), + vec![ + (u16::decode(&mut &[2, 3][..]).unwrap(), 8), + ], + ); + + // The only remaining is the ones outside prefix + assert_eq!( + ChildTriePrefixIterator::with_prefix(&child_info_a, &[]) + .collect::, u8)>>(), + vec![ + (vec![1, 2, 3], 8), + (vec![3], 8), + ], + ); + }); + } } diff --git a/frame/support/src/storage/types/double_map.rs b/frame/support/src/storage/types/double_map.rs index f0b5f66eff058..184d96b3a54f9 100644 --- a/frame/support/src/storage/types/double_map.rs +++ b/frame/support/src/storage/types/double_map.rs @@ -22,9 +22,10 @@ use codec::{FullCodec, Decode, EncodeLike, Encode}; use crate::{ storage::{ StorageAppend, StorageDecodeLength, + bounded_vec::{BoundedVec, BoundedVecValue}, types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, }, - traits::{GetDefault, StorageInstance}, + traits::{GetDefault, StorageInstance, Get}, }; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; use sp_std::vec::Vec; @@ -102,6 +103,50 @@ where } } +impl + StorageDoubleMap< + Prefix, + Hasher1, + Key1, + Hasher2, + Key2, + BoundedVec, + QueryKind, + OnEmpty, + > where + Prefix: StorageInstance, + Hasher1: crate::hash::StorageHasher, + Hasher2: crate::hash::StorageHasher, + Key1: FullCodec, + Key2: FullCodec, + QueryKind: QueryKindTrait, OnEmpty>, + OnEmpty: crate::traits::Get + 'static, + VecValue: BoundedVecValue, + VecBound: Get, +{ + /// Try and append the given item to the double map in the storage. + /// + /// Is only available if `Value` of the map is [`BoundedVec`]. + pub fn try_append( + key1: EncodeLikeKey1, + key2: EncodeLikeKey2, + item: EncodeLikeItem, + ) -> Result<(), ()> + where + EncodeLikeKey1: EncodeLike + Clone, + EncodeLikeKey2: EncodeLike + Clone, + EncodeLikeItem: EncodeLike, + { + < + Self + as + crate::storage::bounded_vec::TryAppendDoubleMap + >::try_append( + key1, key2, item, + ) + } +} + impl StorageDoubleMap where diff --git a/frame/support/src/storage/types/map.rs b/frame/support/src/storage/types/map.rs index 4af28a77cf2b6..187323b4ad1ee 100644 --- a/frame/support/src/storage/types/map.rs +++ b/frame/support/src/storage/types/map.rs @@ -22,9 +22,10 @@ use codec::{FullCodec, Decode, EncodeLike, Encode}; use crate::{ storage::{ StorageAppend, StorageDecodeLength, + bounded_vec::{BoundedVec, BoundedVecValue}, types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, }, - traits::{GetDefault, StorageInstance}, + traits::{GetDefault, StorageInstance, Get}, }; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; use sp_std::prelude::*; @@ -91,6 +92,34 @@ where } } +impl + StorageMap, QueryKind, OnEmpty> +where + Prefix: StorageInstance, + Hasher: crate::hash::StorageHasher, + Key: FullCodec, + QueryKind: QueryKindTrait, OnEmpty>, + OnEmpty: crate::traits::Get + 'static, + VecValue: BoundedVecValue, + VecBound: Get, +{ + /// Try and append the given item to the map in the storage. + /// + /// Is only available if `Value` of the map is [`BoundedVec`]. + pub fn try_append( + key: EncodeLikeKey, + item: EncodeLikeItem, + ) -> Result<(), ()> + where + EncodeLikeKey: EncodeLike + Clone, + EncodeLikeItem: EncodeLike, + { + >::try_append( + key, item, + ) + } +} + impl StorageMap where diff --git a/frame/support/src/storage/types/value.rs b/frame/support/src/storage/types/value.rs index 39f718956eb64..d536d76d76b8e 100644 --- a/frame/support/src/storage/types/value.rs +++ b/frame/support/src/storage/types/value.rs @@ -21,9 +21,10 @@ use codec::{FullCodec, Decode, EncodeLike, Encode}; use crate::{ storage::{ StorageAppend, StorageDecodeLength, + bounded_vec::{BoundedVec, BoundedVecValue}, types::{OptionQuery, QueryKindTrait, OnEmptyGetter}, }, - traits::{GetDefault, StorageInstance}, + traits::{GetDefault, StorageInstance, Get}, }; use frame_metadata::{DefaultByteGetter, StorageEntryModifier}; @@ -60,6 +61,26 @@ where } } +impl + StorageValue, QueryKind, OnEmpty> +where + Prefix: StorageInstance, + QueryKind: QueryKindTrait, OnEmpty>, + OnEmpty: crate::traits::Get + 'static, + VecValue: BoundedVecValue, + VecBound: Get, +{ + /// Try and append the given item to the value in the storage. + /// + /// Is only available if `Value` of the storage is [`BoundedVec`]. + pub fn try_append(item: EncodeLikeItem) -> Result<(), ()> + where + EncodeLikeItem: EncodeLike, + { + >::try_append(item) + } +} + impl StorageValue where Prefix: StorageInstance, diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 9f8afdf7c7546..7ee2b0a56094b 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -15,2347 +15,68 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Traits for FRAME. +//! Traits and associated utilities for use in the FRAME environment. //! //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. -use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; -use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; -use sp_core::u32_trait::Value as U32; -use sp_runtime::{ - traits::{ - AtLeast32Bit, AtLeast32BitUnsigned, Block as BlockT, BadOrigin, Convert, - MaybeSerializeDeserialize, SaturatedConversion, Saturating, StoredMapError, - UniqueSaturatedFrom, UniqueSaturatedInto, Zero, - }, - BoundToRuntimeAppPublic, ConsensusEngineId, DispatchError, DispatchResult, Percent, - RuntimeAppPublic, RuntimeDebug, +pub mod tokens; +pub use tokens::fungible; +pub use tokens::fungibles; +pub use tokens::currency::{ + Currency, LockIdentifier, LockableCurrency, ReservableCurrency, VestingSchedule, }; -use sp_staking::SessionIndex; -use crate::dispatch::Parameter; -use crate::storage::StorageMap; -use crate::weights::Weight; -use bitflags::bitflags; -use impl_trait_for_tuples::impl_for_tuples; +pub use tokens::imbalance::{Imbalance, OnUnbalanced, SignedImbalance}; +pub use tokens::{ExistenceRequirement, WithdrawReasons, BalanceStatus}; -/// Re-expected for the macro. -#[doc(hidden)] -pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; - -/// A trait for online node inspection in a session. -/// -/// Something that can give information about the current validator set. -pub trait ValidatorSet { - /// Type for representing validator id in a session. - type ValidatorId: Parameter; - /// A type for converting `AccountId` to `ValidatorId`. - type ValidatorIdOf: Convert>; - - /// Returns current session index. - fn session_index() -> SessionIndex; - - /// Returns the active set of validators. - fn validators() -> Vec; -} - -/// [`ValidatorSet`] combined with an identification. -pub trait ValidatorSetWithIdentification: ValidatorSet { - /// Full identification of `ValidatorId`. - type Identification: Parameter; - /// A type for converting `ValidatorId` to `Identification`. - type IdentificationOf: Convert>; -} - -/// A session handler for specific key type. -pub trait OneSessionHandler: BoundToRuntimeAppPublic { - /// The key type expected. - type Key: Decode + Default + RuntimeAppPublic; - - /// The given validator set will be used for the genesis session. - /// It is guaranteed that the given validator set will also be used - /// for the second session, therefore the first call to `on_new_session` - /// should provide the same validator set. - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator, ValidatorId: 'a; - - /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. - /// - /// `changed` is true when at least one of the session keys - /// or the underlying economic identities/distribution behind one the - /// session keys has changed, false otherwise. - /// - /// The `validators` are the validators of the incoming session, and `queued_validators` - /// will follow. - fn on_new_session<'a, I: 'a>( - changed: bool, - validators: I, - queued_validators: I, - ) where I: Iterator, ValidatorId: 'a; - - /// A notification for end of the session. - /// - /// Note it is triggered before any `SessionManager::end_session` handlers, - /// so we can still affect the validator set. - fn on_before_session_ending() {} - - /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(_validator_index: usize); -} - -/// Simple trait for providing a filter over a reference to some type. -pub trait Filter { - /// Determine if a given value should be allowed through the filter (returns `true`) or not. - fn filter(_: &T) -> bool; -} - -impl Filter for () { - fn filter(_: &T) -> bool { true } -} - -/// Trait to add a constraint onto the filter. -pub trait FilterStack: Filter { - /// The type used to archive the stack. - type Stack; - - /// Add a new `constraint` onto the filter. - fn push(constraint: impl Fn(&T) -> bool + 'static); - - /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. - fn pop(); - - /// Clear the filter, returning a value that may be used later to `restore` it. - fn take() -> Self::Stack; - - /// Restore the filter from a previous `take` operation. - fn restore(taken: Self::Stack); -} - -/// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. -pub struct FilterStackGuard, T>(PhantomData<(F, T)>); - -/// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when -/// dropped. -pub struct ClearFilterGuard, T>(Option, PhantomData); - -impl, T> FilterStackGuard { - /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when - /// this instance is dropped. - pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { - F::push(constraint); - Self(PhantomData) - } -} - -impl, T> Drop for FilterStackGuard { - fn drop(&mut self) { - F::pop(); - } -} - -impl, T> ClearFilterGuard { - /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when - /// this instance is dropped. - pub fn new() -> Self { - Self(Some(F::take()), PhantomData) - } -} - -impl, T> Drop for ClearFilterGuard { - fn drop(&mut self) { - if let Some(taken) = self.0.take() { - F::restore(taken); - } - } -} - -/// Simple trait for providing a filter over a reference to some type, given an instance of itself. -pub trait InstanceFilter: Sized + Send + Sync { - /// Determine if a given value should be allowed through the filter (returns `true`) or not. - fn filter(&self, _: &T) -> bool; - - /// Determines whether `self` matches at least everything that `_o` does. - fn is_superset(&self, _o: &Self) -> bool { false } -} - -impl InstanceFilter for () { - fn filter(&self, _: &T) -> bool { true } - fn is_superset(&self, _o: &Self) -> bool { true } -} - -#[macro_export] -macro_rules! impl_filter_stack { - ($target:ty, $base:ty, $call:ty, $module:ident) => { - #[cfg(feature = "std")] - mod $module { - #[allow(unused_imports)] - use super::*; - use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; - - thread_local! { - static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); - } - - impl Filter<$call> for $target { - fn filter(call: &$call) -> bool { - <$base>::filter(call) && - FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) - } - } - - impl FilterStack<$call> for $target { - type Stack = Vec bool + 'static>>; - fn push(f: impl Fn(&$call) -> bool + 'static) { - FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); - } - fn pop() { - FILTER.with(|filter| filter.borrow_mut().pop()); - } - fn take() -> Self::Stack { - FILTER.with(|filter| take(filter.borrow_mut().as_mut())) - } - fn restore(mut s: Self::Stack) { - FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); - } - } - } - - #[cfg(not(feature = "std"))] - mod $module { - #[allow(unused_imports)] - use super::*; - use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; - - struct ThisFilter(RefCell bool + 'static>>>); - // NOTE: Safe only in wasm (guarded above) because there's only one thread. - unsafe impl Send for ThisFilter {} - unsafe impl Sync for ThisFilter {} - - static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); - - impl Filter<$call> for $target { - fn filter(call: &$call) -> bool { - <$base>::filter(call) && FILTER.0.borrow().iter().all(|f| f(call)) - } - } - - impl FilterStack<$call> for $target { - type Stack = Vec bool + 'static>>; - fn push(f: impl Fn(&$call) -> bool + 'static) { - FILTER.0.borrow_mut().push(Box::new(f)); - } - fn pop() { - FILTER.0.borrow_mut().pop(); - } - fn take() -> Self::Stack { - take(FILTER.0.borrow_mut().as_mut()) - } - fn restore(mut s: Self::Stack) { - swap(FILTER.0.borrow_mut().as_mut(), &mut s); - } - } - } - } -} - -/// Type that provide some integrity tests. -/// -/// This implemented for modules by `decl_module`. -#[impl_for_tuples(30)] -pub trait IntegrityTest { - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - -#[cfg(test)] -mod test_impl_filter_stack { - use super::*; - - pub struct IsCallable; - pub struct BaseFilter; - impl Filter for BaseFilter { - fn filter(x: &u32) -> bool { x % 2 == 0 } - } - impl_filter_stack!( - crate::traits::test_impl_filter_stack::IsCallable, - crate::traits::test_impl_filter_stack::BaseFilter, - u32, - is_callable - ); - - #[test] - fn impl_filter_stack_should_work() { - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - - IsCallable::push(|x| *x < 42); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - IsCallable::push(|x| *x % 3 == 0); - assert!(IsCallable::filter(&36)); - assert!(!IsCallable::filter(&40)); - - IsCallable::pop(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - let saved = IsCallable::take(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - - IsCallable::restore(saved); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - - IsCallable::pop(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } - - #[test] - fn guards_should_work() { - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - { - let _guard_1 = FilterStackGuard::::new(|x| *x < 42); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - { - let _guard_2 = FilterStackGuard::::new(|x| *x % 3 == 0); - assert!(IsCallable::filter(&36)); - assert!(!IsCallable::filter(&40)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - { - let _guard_2 = ClearFilterGuard::::new(); - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(!IsCallable::filter(&42)); - } - assert!(IsCallable::filter(&36)); - assert!(IsCallable::filter(&40)); - assert!(IsCallable::filter(&42)); - assert!(!IsCallable::filter(&43)); - } -} - -/// An abstraction of a value stored within storage, but possibly as part of a larger composite -/// item. -pub trait StoredMap { - /// Get the item, or its default if it doesn't yet exist; we make no distinction between the - /// two. - fn get(k: &K) -> T; - - /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is - /// returned. It is removed or reset to default value if it has been mutated to `None` - fn try_mutate_exists>( - k: &K, - f: impl FnOnce(&mut Option) -> Result, - ) -> Result; - - // Everything past here has a default implementation. - - /// Mutate the item. - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { - Self::mutate_exists(k, |maybe_account| match maybe_account { - Some(ref mut account) => f(account), - x @ None => { - let mut account = Default::default(); - let r = f(&mut account); - *x = Some(account); - r - } - }) - } - - /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. - /// - /// This is infallible as long as the value does not get destroyed. - fn mutate_exists( - k: &K, - f: impl FnOnce(&mut Option) -> R, - ) -> Result { - Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) - } - - /// Set the item to something new. - fn insert(k: &K, t: T) -> Result<(), StoredMapError> { Self::mutate(k, |i| *i = t) } - - /// Remove the item or otherwise replace it with its default value; we don't care which. - fn remove(k: &K) -> Result<(), StoredMapError> { Self::mutate_exists(k, |x| *x = None) } -} - -/// A simple, generic one-parameter event notifier/handler. -pub trait HandleLifetime { - /// An account was created. - fn created(_t: &T) -> Result<(), StoredMapError> { Ok(()) } - - /// An account was killed. - fn killed(_t: &T) -> Result<(), StoredMapError> { Ok(()) } -} - -impl HandleLifetime for () {} - -/// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this -/// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this -/// would break the ability to have custom impls of `StoredValue`. The other workaround is to -/// implement it directly in the macro. -/// -/// This form has the advantage that two additional types are provides, `Created` and `Removed`, -/// which are both generic events that can be tied to handlers to do something in the case of being -/// about to create an account where one didn't previously exist (at all; not just where it used to -/// be the default value), or where the account is being removed or reset back to the default value -/// where previously it did exist (though may have been in a default state). This works well with -/// system module's `CallOnCreatedAccount` and `CallKillAccount`. -pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); -impl< - S: StorageMap, - L: HandleLifetime, - K: FullCodec, - T: FullCodec + Default, -> StoredMap for StorageMapShim { - fn get(k: &K) -> T { S::get(k) } - fn insert(k: &K, t: T) -> Result<(), StoredMapError> { - if !S::contains_key(&k) { - L::created(k)?; - } - S::insert(k, t); - Ok(()) - } - fn remove(k: &K) -> Result<(), StoredMapError> { - if S::contains_key(&k) { - L::killed(&k)?; - S::remove(k); - } - Ok(()) - } - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { - if !S::contains_key(&k) { - L::created(k)?; - } - Ok(S::mutate(k, f)) - } - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { - S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let r = f(maybe_value); - let exists = maybe_value.is_some(); - - if !existed && exists { - L::created(k)?; - } else if existed && !exists { - L::killed(k)?; - } - Ok(r) - }) - } - fn try_mutate_exists>( - k: &K, - f: impl FnOnce(&mut Option) -> Result, - ) -> Result { - S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let r = f(maybe_value)?; - let exists = maybe_value.is_some(); - - if !existed && exists { - L::created(k).map_err(E::from)?; - } else if existed && !exists { - L::killed(k).map_err(E::from)?; - } - Ok(r) - }) - } -} - -/// Something that can estimate at which block the next session rotation will happen (i.e. a new -/// session starts). -/// -/// The accuracy of the estimates is dependent on the specific implementation, but in order to get -/// the best estimate possible these methods should be called throughout the duration of the session -/// (rather than calling once and storing the result). -/// -/// This should be the same logical unit that dictates `ShouldEndSession` to the session module. No -/// assumptions are made about the scheduling of the sessions. -pub trait EstimateNextSessionRotation { - /// Return the average length of a session. - /// - /// This may or may not be accurate. - fn average_session_length() -> BlockNumber; - - /// Return an estimate of the current session progress. - /// - /// None should be returned if the estimation fails to come to an answer. - fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); - - /// Return the block number at which the next session rotation is estimated to happen. - /// - /// None should be returned if the estimation fails to come to an answer. - fn estimate_next_session_rotation(now: BlockNumber) -> (Option, Weight); -} - -impl EstimateNextSessionRotation for () { - fn average_session_length() -> BlockNumber { - Zero::zero() - } - - fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { - (None, Zero::zero()) - } - - fn estimate_next_session_rotation(_: BlockNumber) -> (Option, Weight) { - (None, Zero::zero()) - } -} - -/// Something that can estimate at which block scheduling of the next session will happen (i.e when -/// we will try to fetch new validators). -/// -/// This only refers to the point when we fetch the next session details and not when we enact them -/// (for enactment there's `EstimateNextSessionRotation`). With `pallet-session` this should be -/// triggered whenever `SessionManager::new_session` is called. -/// -/// For example, if we are using a staking module this would be the block when the session module -/// would ask staking what the next validator set will be, as such this must always be implemented -/// by the session module. -pub trait EstimateNextNewSession { - /// Return the average length of a session. - /// - /// This may or may not be accurate. - fn average_session_length() -> BlockNumber; - - /// Return the block number at which the next new session is estimated to happen. - /// - /// None should be returned if the estimation fails to come to an answer. - fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight); -} - -impl EstimateNextNewSession for () { - fn average_session_length() -> BlockNumber { - Zero::zero() - } - - fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight) { - (None, Zero::zero()) - } -} - -/// Anything that can have a `::len()` method. -pub trait Len { - /// Return the length of data type. - fn len(&self) -> usize; -} - -impl Len for T where ::IntoIter: ExactSizeIterator { - fn len(&self) -> usize { - self.clone().into_iter().len() - } -} - -/// A trait for querying a single value from a type. -/// -/// It is not required that the value is constant. -pub trait Get { - /// Return the current value. - fn get() -> T; -} - -impl Get for () { - fn get() -> T { T::default() } -} - -/// A trait for querying whether a type can be said to "contain" a value. -pub trait Contains { - /// Return `true` if this "contains" the given value `t`. - fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } - - /// Get a vector of all members in the set, ordered. - fn sorted_members() -> Vec; - - /// Get the number of items in the set. - fn count() -> usize { Self::sorted_members().len() } - - /// Add an item that would satisfy `contains`. It does not make sure any other - /// state is correctly maintained or generated. - /// - /// **Should be used for benchmarking only!!!** - #[cfg(feature = "runtime-benchmarks")] - fn add(_t: &T) { unimplemented!() } -} - -/// A trait for querying bound for the length of an implementation of `Contains` -pub trait ContainsLengthBound { - /// Minimum number of elements contained - fn min_len() -> usize; - /// Maximum number of elements contained - fn max_len() -> usize; -} - -/// Handler for when a new account has been created. -#[impl_for_tuples(30)] -pub trait OnNewAccount { - /// A new account `who` has been registered. - fn on_new_account(who: &AccountId); -} - -/// The account with the given id was reaped. -#[impl_for_tuples(30)] -pub trait OnKilledAccount { - /// The account with the given id was reaped. - fn on_killed_account(who: &AccountId); -} - -/// A trait for finding the author of a block header based on the `PreRuntime` digests contained -/// within it. -pub trait FindAuthor { - /// Find the author of a block based on the pre-runtime digests. - fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator; -} - -impl FindAuthor for () { - fn find_author<'a, I>(_: I) -> Option - where I: 'a + IntoIterator - { - None - } -} - -/// A trait for verifying the seal of a header and returning the author. -pub trait VerifySeal { - /// Verify a header and return the author, if any. - fn verify_seal(header: &Header) -> Result, &'static str>; -} - -/// Something which can compute and check proofs of -/// a historical key owner and return full identification data of that -/// key owner. -pub trait KeyOwnerProofSystem { - /// The proof of membership itself. - type Proof: Codec; - /// The full identification of a key owner and the stash account. - type IdentificationTuple: Codec; - - /// Prove membership of a key owner in the current block-state. - /// - /// This should typically only be called off-chain, since it may be - /// computationally heavy. - /// - /// Returns `Some` iff the key owner referred to by the given `key` is a - /// member of the current set. - fn prove(key: Key) -> Option; - - /// Check a proof of membership on-chain. Return `Some` iff the proof is - /// valid and recent enough to check. - fn check_proof(key: Key, proof: Self::Proof) -> Option; -} - -impl KeyOwnerProofSystem for () { - // The proof and identification tuples is any bottom type to guarantee that the methods of this - // implementation can never be called or return anything other than `None`. - type Proof = crate::Void; - type IdentificationTuple = crate::Void; - - fn prove(_key: Key) -> Option { - None - } - - fn check_proof(_key: Key, _proof: Self::Proof) -> Option { - None - } -} - -/// Handler for when some currency "account" decreased in balance for -/// some reason. -/// -/// The only reason at present for an increase would be for validator rewards, but -/// there may be other reasons in the future or for other chains. -/// -/// Reasons for decreases include: -/// -/// - Someone got slashed. -/// - Someone paid for a transaction to be included. -pub trait OnUnbalanced { - /// Handler for some imbalances. The different imbalances might have different origins or - /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all - /// of them. Infallible. - fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { - Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) - } - - /// Handler for some imbalance. Infallible. - fn on_unbalanced(amount: Imbalance) { - amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) - } - - /// Actually handle a non-zero imbalance. You probably want to implement this rather than - /// `on_unbalanced`. - fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } -} - -impl OnUnbalanced for () {} - -/// Simple boolean for whether an account needs to be kept in existence. -#[derive(Copy, Clone, Eq, PartialEq)] -pub enum ExistenceRequirement { - /// Operation must not result in the account going out of existence. - /// - /// Note this implies that if the account never existed in the first place, then the operation - /// may legitimately leave the account unchanged and still non-existent. - KeepAlive, - /// Operation may result in account going out of existence. - AllowDeath, -} - -/// A type for which some values make sense to be able to drop without further consideration. -pub trait TryDrop: Sized { - /// Drop an instance cleanly. Only works if its value represents "no-operation". - fn try_drop(self) -> Result<(), Self>; -} - -/// A trait for a not-quite Linear Type that tracks an imbalance. -/// -/// Functions that alter account balances return an object of this trait to -/// express how much account balances have been altered in aggregate. If -/// dropped, the currency system will take some default steps to deal with -/// the imbalance (`balances` module simply reduces or increases its -/// total issuance). Your module should generally handle it in some way, -/// good practice is to do so in a configurable manner using an -/// `OnUnbalanced` type for each situation in which your module needs to -/// handle an imbalance. -/// -/// Imbalances can either be Positive (funds were added somewhere without -/// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted -/// somewhere without an equal and opposite addition - e.g. a slash or -/// system fee payment). -/// -/// Since they are unsigned, the actual type is always Positive or Negative. -/// The trait makes no distinction except to define the `Opposite` type. -/// -/// New instances of zero value can be created (`zero`) and destroyed -/// (`drop_zero`). -/// -/// Existing instances can be `split` and merged either consuming `self` with -/// `merge` or mutating `self` with `subsume`. If the target is an `Option`, -/// then `maybe_merge` and `maybe_subsume` might work better. Instances can -/// also be `offset` with an `Opposite` that is less than or equal to in value. -/// -/// You can always retrieve the raw balance value using `peek`. -#[must_use] -pub trait Imbalance: Sized + TryDrop { - /// The oppositely imbalanced type. They come in pairs. - type Opposite: Imbalance; - - /// The zero imbalance. Can be destroyed with `drop_zero`. - fn zero() -> Self; - - /// Drop an instance cleanly. Only works if its `self.value()` is zero. - fn drop_zero(self) -> Result<(), Self>; - - /// Consume `self` and return two independent instances; the first - /// is guaranteed to be at most `amount` and the second will be the remainder. - fn split(self, amount: Balance) -> (Self, Self); - - /// Consume `self` and return two independent instances; the amounts returned will be in - /// approximately the same ratio as `first`:`second`. - /// - /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should - /// fit into a `u32`. Overflow will safely saturate in both cases. - fn ration(self, first: u32, second: u32) -> (Self, Self) - where Balance: From + Saturating + Div - { - let total: u32 = first.saturating_add(second); - let amount1 = self.peek().saturating_mul(first.into()) / total.into(); - self.split(amount1) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { - let (a, b) = self.split(amount); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise into two pre-existing Imbalance refs. - /// - /// A convenient replacement for `split` and `subsume`. - fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { - let (a, b) = self.split(amount); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - fn merge(self, other: Self) -> Self; - - /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with - /// reversed arguments. - fn merge_into(self, other: &mut Self) { - other.subsume(self) - } - - /// Consume `self` and maybe an `other` to return a new instance that combines - /// both. - fn maybe_merge(self, other: Option) -> Self { - if let Some(o) = other { - self.merge(o) - } else { - self - } - } - - /// Consume an `other` to mutate `self` into a new instance that combines - /// both. - fn subsume(&mut self, other: Self); - - /// Maybe consume an `other` to mutate `self` into a new instance that combines - /// both. - fn maybe_subsume(&mut self, other: Option) { - if let Some(o) = other { - self.subsume(o) - } - } - - /// Consume self and along with an opposite counterpart to return - /// a combined result. - /// - /// Returns `Ok` along with a new instance of `Self` if this instance has a - /// greater value than the `other`. Otherwise returns `Err` with an instance of - /// the `Opposite`. In both cases the value represents the combination of `self` - /// and `other`. - fn offset(self, other: Self::Opposite) -> Result; - - /// The raw value of self. - fn peek(&self) -> Balance; -} - -/// Either a positive or a negative imbalance. -pub enum SignedImbalance>{ - /// A positive imbalance (funds have been created but none destroyed). - Positive(P), - /// A negative imbalance (funds have been destroyed but none created). - Negative(P::Opposite), -} - -impl< - P: Imbalance, - N: Imbalance, - B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, -> SignedImbalance { - pub fn zero() -> Self { - SignedImbalance::Positive(P::zero()) - } - - pub fn drop_zero(self) -> Result<(), Self> { - match self { - SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), - SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), - } - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - pub fn merge(self, other: Self) -> Self { - match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => - SignedImbalance::Positive(one.merge(other)), - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => - SignedImbalance::Negative(one.merge(other)), - (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => - if one.peek() > other.peek() { - SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) - } else { - SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) - }, - (one, other) => other.merge(one), - } - } -} - -/// Split an unbalanced amount two ways between a common divisor. -pub struct SplitTwoWays< - Balance, - Imbalance, - Part1, - Target1, - Part2, - Target2, ->(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); - -impl< - Balance: From + Saturating + Div, - I: Imbalance, - Part1: U32, - Target1: OnUnbalanced, - Part2: U32, - Target2: OnUnbalanced, -> OnUnbalanced for SplitTwoWays -{ - fn on_nonzero_unbalanced(amount: I) { - let total: u32 = Part1::VALUE + Part2::VALUE; - let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); - let (imb1, imb2) = amount.split(amount1); - Target1::on_unbalanced(imb1); - Target2::on_unbalanced(imb2); - } -} - -/// Abstraction over a fungible assets system. -pub trait Currency { - /// The balance of an account. - type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + - Default; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type PositiveImbalance: Imbalance; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type NegativeImbalance: Imbalance; - - // PUBLIC IMMUTABLES - - /// The combined balance of `who`. - fn total_balance(who: &AccountId) -> Self::Balance; - - /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no - /// balance changes in the meantime and only the reserved balance is not taken into account. - fn can_slash(who: &AccountId, value: Self::Balance) -> bool; - - /// The total amount of issuance in the system. - fn total_issuance() -> Self::Balance; - - /// The minimum balance any single account may have. This is equivalent to the `Balances` module's - /// `ExistentialDeposit`. - fn minimum_balance() -> Self::Balance; - - /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will - /// typically be used to reduce an account by the same amount with e.g. `settle`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example - /// in the case of underflow. - fn burn(amount: Self::Balance) -> Self::PositiveImbalance; - - /// Increase the total issuance by `amount` and return the according imbalance. The imbalance - /// will typically be used to increase an account by the same amount with e.g. - /// `resolve_into_existing` or `resolve_creating`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example - /// in the case of overflow. - fn issue(amount: Self::Balance) -> Self::NegativeImbalance; - - /// Produce a pair of imbalances that cancel each other out exactly. - /// - /// This is just the same as burning and issuing the same amount and has no effect on the - /// total issuance. - fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { - (Self::burn(amount.clone()), Self::issue(amount)) - } - - /// The 'free' balance of a given account. - /// - /// This is the only balance that matters in terms of most operations on tokens. It alone - /// is used to determine the balance when in the contract execution environment. When this - /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is - /// deleted: specifically `FreeBalance`. - /// - /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn free_balance(who: &AccountId) -> Self::Balance; - - /// Returns `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. Basically, it's just a dry-run of `withdraw`. - /// - /// `Err(...)` with the reason why not otherwise. - fn ensure_can_withdraw( - who: &AccountId, - _amount: Self::Balance, - reasons: WithdrawReasons, - new_balance: Self::Balance, - ) -> DispatchResult; - - // PUBLIC MUTABLES (DANGEROUS) - - /// Transfer some liquid free balance to another staker. - /// - /// This is a very high-level function. It will ensure all appropriate fees are paid - /// and no imbalance in the system remains. - fn transfer( - source: &AccountId, - dest: &AccountId, - value: Self::Balance, - existence_requirement: ExistenceRequirement, - ) -> DispatchResult; - - /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// Mints `value` to the free balance of `who`. - /// - /// If `who` doesn't exist, nothing is done and an Err returned. - fn deposit_into_existing( - who: &AccountId, - value: Self::Balance - ) -> result::Result; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_into_existing( - who: &AccountId, - value: Self::NegativeImbalance, - ) -> result::Result<(), Self::NegativeImbalance> { - let v = value.peek(); - match Self::deposit_into_existing(who, v) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. - /// - /// Infallible. - fn deposit_creating( - who: &AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_creating( - who: &AccountId, - value: Self::NegativeImbalance, - ) { - let v = value.peek(); - drop(value.offset(Self::deposit_creating(who, v))); - } - - /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is - /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. - /// - /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, - /// then it returns `Err`. - /// - /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value - /// is `value`. - fn withdraw( - who: &AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result; - - /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. - fn settle( - who: &AccountId, - value: Self::PositiveImbalance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result<(), Self::PositiveImbalance> { - let v = value.peek(); - match Self::withdraw(who, v, reasons, liveness) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Ensure an account's free balance equals some value; this will create the account - /// if needed. - /// - /// Returns a signed imbalance and status to indicate if the account was successfully updated or update - /// has led to killing of the account. - fn make_free_balance_be( - who: &AccountId, - balance: Self::Balance, - ) -> SignedImbalance; -} - -/// Trait for providing an ERC-20 style set of named fungible assets. -pub trait Fungibles { - /// Means of identifying one asset class from another. - type AssetId: FullCodec + Copy + Default; - /// Scalar type for storing balance of an account. - type Balance: AtLeast32BitUnsigned + FullCodec + Copy + Default; - /// Get the `asset` balance of `who`. - fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; - /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. - fn can_deposit(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> bool; - /// Increase the `asset` balance of `who` by `amount`. - fn deposit(asset: Self::AssetId, who: AccountId, amount: Self::Balance) -> DispatchResult; - /// Attempt to reduce the `asset` balance of `who` by `amount`. - fn withdraw(asset: Self::AssetId, who: AccountId, amount: Self::Balance) -> DispatchResult; -} - -/// Status of funds. -#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub enum BalanceStatus { - /// Funds are free, as corresponding to `free` item in Balances. - Free, - /// Funds are reserved, as corresponding to `reserved` item in Balances. - Reserved, -} - -/// A currency where funds can be reserved from the user. -pub trait ReservableCurrency: Currency { - /// Same result as `reserve(who, value)` (but without the side-effects) assuming there - /// are no balance changes in the meantime. - fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; - - /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` - /// is less than `value`, then a non-zero second item will be returned. - fn slash_reserved( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// The amount of the balance of a given account that is externally reserved; this can still get - /// slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// - /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn reserved_balance(who: &AccountId) -> Self::Balance; - - /// Moves `value` from balance to reserved balance. - /// - /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will - /// be returned to notify of this. This is different behavior than `unreserve`. - fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; - - /// Moves up to `value` from reserved balance to free balance. This function cannot fail. - /// - /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` - /// is less than `value`, then the remaining amount will be returned. - /// - /// # NOTES - /// - /// - This is different from `reserve`. - /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will - /// invoke `on_reserved_too_low` and could reap the account. - fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; - - /// Moves up to `value` from reserved balance of account `slashed` to balance of account - /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be - /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, - /// depending on the `status`. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then `Ok(non_zero)` will be returned. - fn repatriate_reserved( - slashed: &AccountId, - beneficiary: &AccountId, - value: Self::Balance, - status: BalanceStatus, - ) -> result::Result; -} - -/// An identifier for a lock. Used for disambiguating different locks so that -/// they can be individually replaced or removed. -pub type LockIdentifier = [u8; 8]; - -/// A currency whose accounts can have liquidity restrictions. -pub trait LockableCurrency: Currency { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The maximum number of locks a user should have on their account. - type MaxLocks: Get; - - /// Create a new balance lock on account `who`. - /// - /// If the new lock is valid (i.e. not already expired), it will push the struct to - /// the `Locks` vec in storage. Note that you can lock more funds than a user has. - /// - /// If the lock `id` already exists, this will update it. - fn set_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all - /// parameters or creates a new one if it does not exist. - /// - /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it - /// applies the most severe constraints of the two, while `set_lock` replaces the lock - /// with the new parameters. As in, `extend_lock` will set: - /// - maximum `amount` - /// - bitwise mask of all `reasons` - fn extend_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Remove an existing lock. - fn remove_lock( - id: LockIdentifier, - who: &AccountId, - ); -} - -/// A vesting schedule over a currency. This allows a particular currency to have vesting limits -/// applied to it. -pub trait VestingSchedule { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The currency that this schedule applies to. - type Currency: Currency; - - /// Get the amount that is currently being vested and cannot be transferred out of this account. - /// Returns `None` if the account has no vesting schedule. - fn vesting_balance(who: &AccountId) -> Option<>::Balance>; - - /// Adds a vesting schedule to a given account. - /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. - /// - /// Is a no-op if the amount to be vested is zero. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn add_vesting_schedule( - who: &AccountId, - locked: >::Balance, - per_block: >::Balance, - starting_block: Self::Moment, - ) -> DispatchResult; - - /// Remove a vesting schedule for a given account. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn remove_vesting_schedule(who: &AccountId); -} - -bitflags! { - /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] - pub struct WithdrawReasons: i8 { - /// In order to pay for (system) transaction costs. - const TRANSACTION_PAYMENT = 0b00000001; - /// In order to transfer ownership. - const TRANSFER = 0b00000010; - /// In order to reserve some funds for a later return or repatriation. - const RESERVE = 0b00000100; - /// In order to pay some other (higher-level) fees. - const FEE = 0b00001000; - /// In order to tip a validator for transaction inclusion. - const TIP = 0b00010000; - } -} - -impl WithdrawReasons { - /// Choose all variants except for `one`. - /// - /// ```rust - /// # use frame_support::traits::WithdrawReasons; - /// # fn main() { - /// assert_eq!( - /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, - /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), - /// ); - /// # } - /// ``` - pub fn except(one: WithdrawReasons) -> WithdrawReasons { - let mut flags = Self::all(); - flags.toggle(one); - flags - } -} - -pub trait Time { - type Moment: AtLeast32Bit + Parameter + Default + Copy; - - fn now() -> Self::Moment; -} - -/// Trait to deal with unix time. -pub trait UnixTime { - /// Return duration since `SystemTime::UNIX_EPOCH`. - fn now() -> core::time::Duration; -} - -/// Trait for type that can handle incremental changes to a set of account IDs. -pub trait ChangeMembers { - /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The - /// new set is given by `new`, and need not be sorted. - /// - /// This resets any previous value of prime. - fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { - new.sort(); - Self::change_members_sorted(incoming, outgoing, &new[..]); - } - - /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The - /// new set is thus given by `sorted_new` and **must be sorted**. - /// - /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. - /// - /// This resets any previous value of prime. - fn change_members_sorted( - incoming: &[AccountId], - outgoing: &[AccountId], - sorted_new: &[AccountId], - ); - - /// Set the new members; they **must already be sorted**. This will compute the diff and use it to - /// call `change_members_sorted`. - /// - /// This resets any previous value of prime. - fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { - let (incoming, outgoing) = Self::compute_members_diff_sorted(new_members, old_members); - Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); - } - - /// Compute diff between new and old members; they **must already be sorted**. - /// - /// Returns incoming and outgoing members. - fn compute_members_diff_sorted( - new_members: &[AccountId], - old_members: &[AccountId], - ) -> (Vec, Vec) { - let mut old_iter = old_members.iter(); - let mut new_iter = new_members.iter(); - let mut incoming = Vec::new(); - let mut outgoing = Vec::new(); - let mut old_i = old_iter.next(); - let mut new_i = new_iter.next(); - loop { - match (old_i, new_i) { - (None, None) => break, - (Some(old), Some(new)) if old == new => { - old_i = old_iter.next(); - new_i = new_iter.next(); - } - (Some(old), Some(new)) if old < new => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (Some(old), None) => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (_, Some(new)) => { - incoming.push(new.clone()); - new_i = new_iter.next(); - } - } - } - (incoming, outgoing) - } - - /// Set the prime member. - fn set_prime(_prime: Option) {} - - /// Get the current prime. - fn get_prime() -> Option { - None - } -} - -impl ChangeMembers for () { - fn change_members(_: &[T], _: &[T], _: Vec) {} - fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} - fn set_members_sorted(_: &[T], _: &[T]) {} - fn set_prime(_: Option) {} -} - -/// Trait for type that can handle the initialization of account IDs at genesis. -pub trait InitializeMembers { - /// Initialize the members to the given `members`. - fn initialize_members(members: &[AccountId]); -} - -impl InitializeMembers for () { - fn initialize_members(_: &[T]) {} -} - -/// A trait that is able to provide randomness. -/// -/// Being a deterministic blockchain, real randomness is difficult to come by, different -/// implementations of this trait will provide different security guarantees. At best, -/// this will be randomness which was hard to predict a long time ago, but that has become -/// easy to predict recently. -pub trait Randomness { - /// Get the most recently determined random seed, along with the time in the past - /// since when it was determinable by chain observers. - /// - /// `subject` is a context identifier and allows you to get a different result to - /// other callers of this function; use it like `random(&b"my context"[..])`. - /// - /// NOTE: The returned seed should only be used to distinguish commitments made before - /// the returned block number. If the block number is too early (i.e. commitments were - /// made afterwards), then ensure no further commitments may be made and repeatedly - /// call this on later blocks until the block number returned is later than the latest - /// commitment. - fn random(subject: &[u8]) -> (Output, BlockNumber); - - /// Get the basic random seed. - /// - /// In general you won't want to use this, but rather `Self::random` which allows - /// you to give a subject for the random result and whose value will be - /// independently low-influence random from any other such seeds. - /// - /// NOTE: The returned seed should only be used to distinguish commitments made before - /// the returned block number. If the block number is too early (i.e. commitments were - /// made afterwards), then ensure no further commitments may be made and repeatedly - /// call this on later blocks until the block number returned is later than the latest - /// commitment. - fn random_seed() -> (Output, BlockNumber) { - Self::random(&[][..]) - } -} - -/// Trait to be used by block producing consensus engine modules to determine -/// how late the current block is (e.g. in a slot-based proposal mechanism how -/// many slots were skipped since the previous block). -pub trait Lateness { - /// Returns a generic measure of how late the current block is compared to - /// its parent. - fn lateness(&self) -> N; -} - -impl Lateness for () { - fn lateness(&self) -> N { - Zero::zero() - } -} - -/// Implementors of this trait provide information about whether or not some validator has -/// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. -pub trait ValidatorRegistration { - /// Returns true if the provided validator ID has been registered with the implementing runtime - /// module - fn is_registered(id: &ValidatorId) -> bool; -} - -/// Provides information about the pallet setup in the runtime. -/// -/// An implementor should be able to provide information about each pallet that -/// is configured in `construct_runtime!`. -pub trait PalletInfo { - /// Convert the given pallet `P` into its index as configured in the runtime. - fn index() -> Option; - /// Convert the given pallet `P` into its name as configured in the runtime. - fn name() -> Option<&'static str>; -} - -/// The function and pallet name of the Call. -#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] -pub struct CallMetadata { - /// Name of the function. - pub function_name: &'static str, - /// Name of the pallet to which the function belongs. - pub pallet_name: &'static str, -} - -/// Gets the function name of the Call. -pub trait GetCallName { - /// Return all function names. - fn get_call_names() -> &'static [&'static str]; - /// Return the function name of the Call. - fn get_call_name(&self) -> &'static str; -} - -/// Gets the metadata for the Call - function name and pallet name. -pub trait GetCallMetadata { - /// Return all module names. - fn get_module_names() -> &'static [&'static str]; - /// Return all function names for the given `module`. - fn get_call_names(module: &str) -> &'static [&'static str]; - /// Return a [`CallMetadata`], containing function and pallet name of the Call. - fn get_call_metadata(&self) -> CallMetadata; -} - -/// The block finalization trait. -/// -/// Implementing this lets you express what should happen for your pallet when the block is ending. -#[impl_for_tuples(30)] -pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - /// - /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, - /// including inherent extrinsics. - fn on_finalize(_n: BlockNumber) {} -} - -/// The block's on idle trait. -/// -/// Implementing this lets you express what should happen for your pallet before -/// block finalization (see `on_finalize` hook) in case any remaining weight is left. -pub trait OnIdle { - /// The block is being finalized. - /// Implement to have something happen in case there is leftover weight. - /// Check the passed `remaining_weight` to make sure it is high enough to allow for - /// your pallet's extra computation. - /// - /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - - /// in a block are applied but before `on_finalize` is executed. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight - ) -> crate::weights::Weight { - 0 - } -} - -#[impl_for_tuples(30)] -impl OnIdle for Tuple { - fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( - let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); - weight = weight.saturating_add(Tuple::on_idle(n.clone(), adjusted_remaining_weight)); - )* ); - weight - } -} - -/// The block initialization trait. -/// -/// Implementing this lets you express what should happen for your pallet when the block is -/// beginning (right before the first extrinsic is executed). -pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - /// - /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, - /// including inherent extrinsics. Hence for instance, if you runtime includes - /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } -} - -#[impl_for_tuples(30)] -impl OnInitialize for Tuple { - fn on_initialize(n: BlockNumber) -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); - weight - } -} - -/// A trait that will be called at genesis. -/// -/// Implementing this trait for a pallet let's you express operations that should -/// happen at genesis. It will be called in an externalities provided environment and -/// will see the genesis state after all pallets have written their genesis state. -#[impl_for_tuples(30)] -pub trait OnGenesis { - /// Something that should happen at genesis. - fn on_genesis() {} -} - -/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. -#[cfg(feature = "try-runtime")] -pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; +mod members; +pub use members::{ + Contains, ContainsLengthBound, SortedMembers, InitializeMembers, ChangeMembers, All, IsInVec, + AsContains, +}; -/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. -#[cfg(feature = "try-runtime")] -pub trait OnRuntimeUpgradeHelpersExt { - /// Generate a storage key unique to this runtime upgrade. - /// - /// This can be used to communicate data from pre-upgrade to post-upgrade state and check - /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. - #[cfg(feature = "try-runtime")] - fn storage_key(ident: &str) -> [u8; 32] { - let prefix = sp_io::hashing::twox_128(ON_RUNTIME_UPGRADE_PREFIX); - let ident = sp_io::hashing::twox_128(ident.as_bytes()); +mod validation; +pub use validation::{ + ValidatorSet, ValidatorSetWithIdentification, OneSessionHandler, FindAuthor, VerifySeal, + EstimateNextNewSession, EstimateNextSessionRotation, KeyOwnerProofSystem, ValidatorRegistration, + Lateness, +}; - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&prefix); - final_key[16..].copy_from_slice(&ident); +mod filter; +pub use filter::{ + Filter, FilterStack, FilterStackGuard, ClearFilterGuard, InstanceFilter, IntegrityTest, +}; - final_key - } +mod misc; +pub use misc::{ + Len, Get, GetDefault, HandleLifetime, TryDrop, Time, UnixTime, IsType, IsSubType, ExecuteBlock, + SameOrOther, OnNewAccount, OnKilledAccount, OffchainWorker, GetBacking, Backing, ExtrinsicCall, + EnsureInherentsAreFirst, +}; - /// Get temporary storage data written by [`Self::set_temp_storage`]. - /// - /// Returns `None` if either the data is unavailable or un-decodable. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being read from. - #[cfg(feature = "try-runtime")] - fn get_temp_storage(at: &str) -> Option { - sp_io::storage::get(&Self::storage_key(at)) - .and_then(|bytes| Decode::decode(&mut &*bytes).ok()) - } +mod stored_map; +pub use stored_map::{StoredMap, StorageMapShim}; +mod randomness; +pub use randomness::Randomness; - /// Write some temporary data to a specific storage that can be read (potentially in - /// post-upgrade hook) via [`Self::get_temp_storage`]. - /// - /// A `at` storage identifier must be provided to indicate where the storage is being written - /// to. - #[cfg(feature = "try-runtime")] - fn set_temp_storage(data: T, at: &str) { - sp_io::storage::set(&Self::storage_key(at), &data.encode()); - } -} +mod metadata; +pub use metadata::{ + CallMetadata, GetCallMetadata, GetCallName, PalletInfo, PalletVersion, GetPalletVersion, + PALLET_VERSION_STORAGE_KEY_POSTFIX, PalletInfoAccess, +}; +mod hooks; +pub use hooks::{Hooks, OnGenesis, OnInitialize, OnFinalize, OnIdle, OnRuntimeUpgrade, OnTimestampSet}; #[cfg(feature = "try-runtime")] -impl OnRuntimeUpgradeHelpersExt for U {} - -/// The runtime upgrade trait. -/// -/// Implementing this lets you express what should happen when the runtime upgrades, -/// and changes may need to occur to your module. -pub trait OnRuntimeUpgrade { - /// Perform a module upgrade. - /// - /// # Warning - /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other - /// block local data are not accessible. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { - 0 - } - - /// Execute some pre-checks prior to a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { Ok(()) } - - /// Execute some post-checks after a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { Ok(()) } -} - -#[impl_for_tuples(30)] -impl OnRuntimeUpgrade for Tuple { - fn on_runtime_upgrade() -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); - weight - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); - result - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - let mut result = Ok(()); - for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); - result - } -} - -/// Off-chain computation trait. -/// -/// Implementing this trait on a module allows you to perform long-running tasks -/// that make (by default) validators generate transactions that feed results -/// of those long-running computations back on chain. -/// -/// NOTE: This function runs off-chain, so it can access the block state, -/// but cannot preform any alterations. More specifically alterations are -/// not forbidden, but they are not persisted in any way after the worker -/// has finished. -#[impl_for_tuples(30)] -pub trait OffchainWorker { - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} -} - -pub mod schedule { - use super::*; - - /// Information relating to the period of a scheduled task. First item is the length of the - /// period and the second is the number of times it should be executed in total before the task - /// is considered finished and removed. - pub type Period = (BlockNumber, u32); - - /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning - /// higher priority. - pub type Priority = u8; - - /// The dispatch time of a scheduled task. - #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] - pub enum DispatchTime { - /// At specified block. - At(BlockNumber), - /// After specified number of blocks. - After(BlockNumber), - } - - /// The highest priority. We invert the value so that normal sorting will place the highest - /// priority at the beginning of the list. - pub const HIGHEST_PRIORITY: Priority = 0; - /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even - /// if it breaches the `MaximumWeight` limitation. - pub const HARD_DEADLINE: Priority = 63; - /// The lowest priority. Most stuff should be around here. - pub const LOWEST_PRIORITY: Priority = 255; - - /// A type that can be used as a scheduler. - pub trait Anon { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// This is not named. - fn schedule( - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Call - ) -> Result; - - /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, - /// also. - /// - /// Will return an error if the `address` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - /// - /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For - /// that, you must name the task explicitly using the `Named` trait. - fn cancel(address: Self::Address) -> Result<(), ()>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. For periodic tasks, - /// this dispatch is guaranteed to succeed only before the *initial* execution; for - /// others, use `reschedule_named`. - /// - /// Will return an error if the `address` is invalid. - fn reschedule( - address: Self::Address, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an error if the `address` is invalid. - fn next_dispatch_time(address: Self::Address) -> Result; - } - - /// A type that can be used as a scheduler. - pub trait Named { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; - - /// Schedule a dispatch to happen at the beginning of some block in the future. - /// - /// - `id`: The identity of the task. This must be unique and will return an error if not. - fn schedule_named( - id: Vec, - when: DispatchTime, - maybe_periodic: Option>, - priority: Priority, - origin: Origin, - call: Call - ) -> Result; - - /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances - /// of that, also. - /// - /// Will return an error if the `id` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - fn cancel_named(id: Vec) -> Result<(), ()>; - - /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed - /// only if it is executed *before* the currently scheduled block. - fn reschedule_named( - id: Vec, - when: DispatchTime, - ) -> Result; - - /// Return the next dispatch time for a given task. - /// - /// Will return an error if the `id` is invalid. - fn next_dispatch_time(id: Vec) -> Result; - } -} - -/// Some sort of check on the origin is performed by this object. -pub trait EnsureOrigin { - /// A return type. - type Success; - /// Perform the origin check. - fn ensure_origin(o: OuterOrigin) -> result::Result { - Self::try_origin(o).map_err(|_| BadOrigin) - } - /// Perform the origin check. - fn try_origin(o: OuterOrigin) -> result::Result; - - /// Returns an outer origin capable of passing `try_origin` check. - /// - /// ** Should be used for benchmarking only!!! ** - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> OuterOrigin; -} - -/// Type that can be dispatched with an origin but without checking the origin filter. -/// -/// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by -/// `construct_runtime` and `impl_outer_dispatch`. -pub trait UnfilteredDispatchable { - /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). - type Origin; - - /// Dispatch this call but do not check the filter in origin. - fn dispatch_bypass_filter(self, origin: Self::Origin) -> crate::dispatch::DispatchResultWithPostInfo; -} - -/// Methods available on `frame_system::Config::Origin`. -pub trait OriginTrait: Sized { - /// Runtime call type, as in `frame_system::Config::Call` - type Call; - - /// The caller origin, overarching type of all pallets origins. - type PalletsOrigin; - - /// The AccountId used across the system. - type AccountId; - - /// Add a filter to the origin. - fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); - - /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. - fn reset_filter(&mut self); - - /// Replace the caller with caller from the other origin - fn set_caller_from(&mut self, other: impl Into); - - /// Filter the call, if false then call is filtered out. - fn filter_call(&self, call: &Self::Call) -> bool; - - /// Get the caller. - fn caller(&self) -> &Self::PalletsOrigin; - - /// Create with system none origin and `frame-system::Config::BaseCallFilter`. - fn none() -> Self; - - /// Create with system root origin and no filter. - fn root() -> Self; - - /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. - fn signed(by: Self::AccountId) -> Self; -} - -/// Trait to be used when types are exactly same. -/// -/// This allow to convert back and forth from type, a reference and a mutable reference. -pub trait IsType: Into + From { - /// Cast reference. - fn from_ref(t: &T) -> &Self; - - /// Cast reference. - fn into_ref(&self) -> &T; - - /// Cast mutable reference. - fn from_mut(t: &mut T) -> &mut Self; - - /// Cast mutable reference. - fn into_mut(&mut self) -> &mut T; -} - -impl IsType for T { - fn from_ref(t: &T) -> &Self { t } - fn into_ref(&self) -> &T { self } - fn from_mut(t: &mut T) -> &mut Self { t } - fn into_mut(&mut self) -> &mut T { self } -} - -/// An instance of a pallet in the storage. -/// -/// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! -/// -/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances -/// "InstanceNMyModule". -pub trait Instance: 'static { - /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" - const PREFIX: &'static str; -} - -/// An instance of a storage in a pallet. -/// -/// Define an instance for an individual storage inside a pallet. -/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is -/// used to isolate storages inside a pallet. -/// -/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which -/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` -pub trait StorageInstance { - /// Prefix of a pallet to isolate it from other pallets. - fn pallet_prefix() -> &'static str; - - /// Prefix given to a storage to isolate from other storages in the pallet. - const STORAGE_PREFIX: &'static str; -} - -/// Implement Get by returning Default for any type that implements Default. -pub struct GetDefault; -impl crate::traits::Get for GetDefault { - fn get() -> T { - T::default() - } -} - -/// A trait similar to `Convert` to convert values from `B` an abstract balance type -/// into u64 and back from u128. (This conversion is used in election and other places where complex -/// calculation over balance type is needed) -/// -/// Total issuance of the currency is passed in, but an implementation of this trait may or may not -/// use it. -/// -/// # WARNING -/// -/// the total issuance being passed in implies that the implementation must be aware of the fact -/// that its values can affect the outcome. This implies that if the vote value is dependent on the -/// total issuance, it should never ber written to storage for later re-use. -pub trait CurrencyToVote { - /// Convert balance to u64. - fn to_vote(value: B, issuance: B) -> u64; - - /// Convert u128 to balance. - fn to_currency(value: u128, issuance: B) -> B; -} - -/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. -/// -/// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the -/// important cases: -/// -/// If the chain's total issuance is less than u64::max(), this will always be 1, which means that -/// the factor will not have any effect. In this case, any account's balance is also less. Thus, -/// both of the conversions are basically an `as`; Any balance can fit in u64. -/// -/// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and -/// divided upon conversion. -pub struct U128CurrencyToVote; - -impl U128CurrencyToVote { - fn factor(issuance: u128) -> u128 { - (issuance / u64::max_value() as u128).max(1) - } -} - -impl CurrencyToVote for U128CurrencyToVote { - fn to_vote(value: u128, issuance: u128) -> u64 { - (value / Self::factor(issuance)).saturated_into() - } - - fn to_currency(value: u128, issuance: u128) -> u128 { - value.saturating_mul(Self::factor(issuance)) - } -} - - -/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. -/// -/// # Warning -/// -/// This is designed to be used mostly for testing. Use with care, and think about the consequences. -pub struct SaturatingCurrencyToVote; - -impl + UniqueSaturatedFrom> CurrencyToVote for SaturatingCurrencyToVote { - fn to_vote(value: B, _: B) -> u64 { - value.unique_saturated_into() - } - - fn to_currency(value: u128, _: B) -> B { - B::unique_saturated_from(value) - } -} - -/// Something that can be checked to be a of sub type `T`. -/// -/// This is useful for enums where each variant encapsulates a different sub type, and -/// you need access to these sub types. -/// -/// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this -/// to check if a certain call is an instance of the local pallet's `Call` enum. -/// -/// # Example -/// -/// ``` -/// # use frame_support::traits::IsSubType; -/// -/// enum Test { -/// String(String), -/// U32(u32), -/// } -/// -/// impl IsSubType for Test { -/// fn is_sub_type(&self) -> Option<&String> { -/// match self { -/// Self::String(ref r) => Some(r), -/// _ => None, -/// } -/// } -/// } -/// -/// impl IsSubType for Test { -/// fn is_sub_type(&self) -> Option<&u32> { -/// match self { -/// Self::U32(ref r) => Some(r), -/// _ => None, -/// } -/// } -/// } -/// -/// fn main() { -/// let data = Test::String("test".into()); -/// -/// assert_eq!("test", IsSubType::::is_sub_type(&data).unwrap().as_str()); -/// } -/// ``` -pub trait IsSubType { - /// Returns `Some(_)` if `self` is an instance of sub type `T`. - fn is_sub_type(&self) -> Option<&T>; -} - -/// The pallet hooks trait. Implementing this lets you express some logic to execute. -pub trait Hooks { - /// The block is being finalized. Implement to have something happen. - fn on_finalize(_n: BlockNumber) {} - - /// This will be run when the block is being finalized (before `on_finalize`). - /// Implement to have something happen using the remaining weight. - /// Will not fire if the remaining weight is 0. - /// Return the weight used, the hook will subtract it from current weight used - /// and pass the result to the next `on_idle` hook if it exists. - fn on_idle( - _n: BlockNumber, - _remaining_weight: crate::weights::Weight - ) -> crate::weights::Weight { - 0 - } - - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } - - /// Perform a module upgrade. - /// - /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it - /// doesn't include the write of the pallet version in storage. The final complete logic - /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by - /// `Pallet`. - /// - /// # Warning - /// - /// This function will be called before we initialized any runtime state, aka `on_initialize` - /// wasn't called yet. So, information like the block number and any other - /// block local data are not accessible. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { 0 } - - /// Execute some pre-checks prior to a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result<(), &'static str> { - Ok(()) - } - - /// Execute some post-checks after a runtime upgrade. - /// - /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. - #[cfg(feature = "try-runtime")] - fn post_upgrade() -> Result<(), &'static str> { - Ok(()) - } - - /// Implementing this function on a module allows you to perform long-running tasks - /// that make (by default) validators generate transactions that feed results - /// of those long-running computations back on chain. - /// - /// NOTE: This function runs off-chain, so it can access the block state, - /// but cannot preform any alterations. More specifically alterations are - /// not forbidden, but they are not persisted in any way after the worker - /// has finished. - /// - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} - - /// Run integrity test. - /// - /// The test is not executed in a externalities provided environment. - fn integrity_test() {} -} - -/// A trait to define the build function of a genesis config, T and I are placeholder for pallet -/// trait and pallet instance. +pub use hooks::{OnRuntimeUpgradeHelpersExt, ON_RUNTIME_UPGRADE_PREFIX}; #[cfg(feature = "std")] -pub trait GenesisBuild: Default + MaybeSerializeDeserialize { - /// The build function is called within an externalities allowing storage APIs. - /// Thus one can write to storage using regular pallet storages. - fn build(&self); - - /// Build the storage using `build` inside default storage. - fn build_storage(&self) -> Result { - let mut storage = Default::default(); - self.assimilate_storage(&mut storage)?; - Ok(storage) - } - - /// Assimilate the storage for this module into pre-existing overlays. - fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { - sp_state_machine::BasicExternalities::execute_with_storage(storage, || { - self.build(); - Ok(()) - }) - } -} - -/// The storage key postfix that is used to store the [`PalletVersion`] per pallet. -/// -/// The full storage key is built by using: -/// Twox128([`PalletInfo::name`]) ++ Twox128([`PALLET_VERSION_STORAGE_KEY_POSTFIX`]) -pub const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; - -/// The version of a pallet. -/// -/// Each pallet version is stored in the state under a fixed key. See -/// [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. -#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy)] -pub struct PalletVersion { - /// The major version of the pallet. - pub major: u16, - /// The minor version of the pallet. - pub minor: u8, - /// The patch version of the pallet. - pub patch: u8, -} - -impl PalletVersion { - /// Creates a new instance of `Self`. - pub fn new(major: u16, minor: u8, patch: u8) -> Self { - Self { - major, - minor, - patch, - } - } - - /// Returns the storage key for a pallet version. - /// - /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. - /// - /// Returns `None` if the given `PI` returned a `None` as name for the given - /// `Pallet`. - pub fn storage_key() -> Option<[u8; 32]> { - let pallet_name = PI::name::()?; - - let pallet_name = sp_io::hashing::twox_128(pallet_name.as_bytes()); - let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); - - let mut final_key = [0u8; 32]; - final_key[..16].copy_from_slice(&pallet_name); - final_key[16..].copy_from_slice(&postfix); - - Some(final_key) - } - - /// Put this pallet version into the storage. - /// - /// It will use the storage key that is associated with the given `Pallet`. - /// - /// # Panics - /// - /// This function will panic iff `Pallet` can not be found by `PalletInfo`. - /// In a runtime that is put together using - /// [`construct_runtime!`](crate::construct_runtime) this should never happen. - /// - /// It will also panic if this function isn't executed in an externalities - /// provided environment. - pub fn put_into_storage(&self) { - let key = Self::storage_key::() - .expect("Every active pallet has a name in the runtime; qed"); - - crate::storage::unhashed::put(&key, self); - } -} - -impl sp_std::cmp::PartialOrd for PalletVersion { - fn partial_cmp(&self, other: &Self) -> Option { - let res = self.major - .cmp(&other.major) - .then_with(|| - self.minor - .cmp(&other.minor) - .then_with(|| self.patch.cmp(&other.patch) - )); - - Some(res) - } -} - -/// Provides version information about a pallet. -/// -/// This trait provides two functions for returning the version of a -/// pallet. There is a state where both functions can return distinct versions. -/// See [`GetPalletVersion::storage_version`] for more information about this. -pub trait GetPalletVersion { - /// Returns the current version of the pallet. - fn current_version() -> PalletVersion; - - /// Returns the version of the pallet that is stored in storage. - /// - /// Most of the time this will return the exact same version as - /// [`GetPalletVersion::current_version`]. Only when being in - /// a state after a runtime upgrade happened and the pallet did - /// not yet updated its version in storage, this will return a - /// different(the previous, seen from the time of calling) version. - /// - /// See [`PalletVersion`] for more information. - /// - /// # Note - /// - /// If there was no previous version of the pallet stored in the state, - /// this function returns `None`. - fn storage_version() -> Option; -} - -/// Something that can execute a given block. -/// -/// Executing a block means that all extrinsics in a given block will be executed and the resulting -/// header will be checked against the header of the given block. -pub trait ExecuteBlock { - /// Execute the given `block`. - /// - /// This will execute all extrinsics in the block and check that the resulting header is correct. - /// - /// # Panic - /// - /// Panics when an extrinsics panics or the resulting header doesn't match the expected header. - fn execute_block(block: Block); -} - -/// A trait which is called when the timestamp is set in the runtime. -#[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait OnTimestampSet { - /// Called when the timestamp is set. - fn on_timestamp_set(moment: Moment); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { - struct Test; - impl OnInitialize for Test { - fn on_initialize(_n: u8) -> crate::weights::Weight { - 10 - } - } - impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> crate::weights::Weight { - 20 - } - } +pub use hooks::GenesisBuild; - assert_eq!(<(Test, Test)>::on_initialize(0), 20); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); - } +pub mod schedule; +mod storage; +pub use storage::{Instance, StorageInstance}; - #[test] - fn check_pallet_version_ordering() { - let version = PalletVersion::new(1, 0, 0); - assert!(version > PalletVersion::new(0, 1, 2)); - assert!(version == PalletVersion::new(1, 0, 0)); - assert!(version < PalletVersion::new(1, 0, 1)); - assert!(version < PalletVersion::new(1, 1, 0)); +mod dispatch; +pub use dispatch::{EnsureOrigin, OriginTrait, UnfilteredDispatchable}; - let version = PalletVersion::new(2, 50, 50); - assert!(version < PalletVersion::new(2, 50, 51)); - assert!(version > PalletVersion::new(2, 49, 51)); - assert!(version < PalletVersion::new(3, 49, 51)); - } -} +mod voting; +pub use voting::{CurrencyToVote, SaturatingCurrencyToVote, U128CurrencyToVote}; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs new file mode 100644 index 0000000000000..6174238e35537 --- /dev/null +++ b/frame/support/src/traits/dispatch.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with dispatching calls and the origin from which they are dispatched. + +use crate::dispatch::DispatchResultWithPostInfo; +use sp_runtime::traits::BadOrigin; + +/// Some sort of check on the origin is performed by this object. +pub trait EnsureOrigin { + /// A return type. + type Success; + /// Perform the origin check. + fn ensure_origin(o: OuterOrigin) -> Result { + Self::try_origin(o).map_err(|_| BadOrigin) + } + /// Perform the origin check. + fn try_origin(o: OuterOrigin) -> Result; + + /// Returns an outer origin capable of passing `try_origin` check. + /// + /// ** Should be used for benchmarking only!!! ** + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> OuterOrigin; +} + +/// Type that can be dispatched with an origin but without checking the origin filter. +/// +/// Implemented for pallet dispatchable type by `decl_module` and for runtime dispatchable by +/// `construct_runtime` and `impl_outer_dispatch`. +pub trait UnfilteredDispatchable { + /// The origin type of the runtime, (i.e. `frame_system::Config::Origin`). + type Origin; + + /// Dispatch this call but do not check the filter in origin. + fn dispatch_bypass_filter(self, origin: Self::Origin) -> DispatchResultWithPostInfo; +} + +/// Methods available on `frame_system::Config::Origin`. +pub trait OriginTrait: Sized { + /// Runtime call type, as in `frame_system::Config::Call` + type Call; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin; + + /// The AccountId used across the system. + type AccountId; + + /// Add a filter to the origin. + fn add_filter(&mut self, filter: impl Fn(&Self::Call) -> bool + 'static); + + /// Reset origin filters to default one, i.e `frame_system::Config::BaseCallFilter`. + fn reset_filter(&mut self); + + /// Replace the caller with caller from the other origin + fn set_caller_from(&mut self, other: impl Into); + + /// Filter the call, if false then call is filtered out. + fn filter_call(&self, call: &Self::Call) -> bool; + + /// Get the caller. + fn caller(&self) -> &Self::PalletsOrigin; + + /// Do something with the caller, consuming self but returning it if the caller was unused. + fn try_with_caller( + self, + f: impl FnOnce(Self::PalletsOrigin) -> Result, + ) -> Result; + + /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + fn none() -> Self; + + /// Create with system root origin and no filter. + fn root() -> Self; + + /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: Self::AccountId) -> Self; +} diff --git a/frame/support/src/traits/filter.rs b/frame/support/src/traits/filter.rs new file mode 100644 index 0000000000000..f884a8ece72e5 --- /dev/null +++ b/frame/support/src/traits/filter.rs @@ -0,0 +1,282 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated utilities for dealing with abstract constraint filters. + +use sp_std::marker::PhantomData; + +/// Simple trait for providing a filter over a reference to some type. +pub trait Filter { + /// Determine if a given value should be allowed through the filter (returns `true`) or not. + fn filter(_: &T) -> bool; +} + +impl Filter for () { + fn filter(_: &T) -> bool { true } +} + +/// Trait to add a constraint onto the filter. +pub trait FilterStack: Filter { + /// The type used to archive the stack. + type Stack; + + /// Add a new `constraint` onto the filter. + fn push(constraint: impl Fn(&T) -> bool + 'static); + + /// Removes the most recently pushed, and not-yet-popped, constraint from the filter. + fn pop(); + + /// Clear the filter, returning a value that may be used later to `restore` it. + fn take() -> Self::Stack; + + /// Restore the filter from a previous `take` operation. + fn restore(taken: Self::Stack); +} + +/// Guard type for pushing a constraint to a `FilterStack` and popping when dropped. +pub struct FilterStackGuard, T>(PhantomData<(F, T)>); + +/// Guard type for clearing all pushed constraints from a `FilterStack` and reinstating them when +/// dropped. +pub struct ClearFilterGuard, T>(Option, PhantomData); + +impl, T> FilterStackGuard { + /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when + /// this instance is dropped. + pub fn new(constraint: impl Fn(&T) -> bool + 'static) -> Self { + F::push(constraint); + Self(PhantomData) + } +} + +impl, T> Drop for FilterStackGuard { + fn drop(&mut self) { + F::pop(); + } +} + +impl, T> ClearFilterGuard { + /// Create a new instance, adding a new `constraint` onto the filter `T`, and popping it when + /// this instance is dropped. + pub fn new() -> Self { + Self(Some(F::take()), PhantomData) + } +} + +impl, T> Drop for ClearFilterGuard { + fn drop(&mut self) { + if let Some(taken) = self.0.take() { + F::restore(taken); + } + } +} + +/// Simple trait for providing a filter over a reference to some type, given an instance of itself. +pub trait InstanceFilter: Sized + Send + Sync { + /// Determine if a given value should be allowed through the filter (returns `true`) or not. + fn filter(&self, _: &T) -> bool; + + /// Determines whether `self` matches at least everything that `_o` does. + fn is_superset(&self, _o: &Self) -> bool { false } +} + +impl InstanceFilter for () { + fn filter(&self, _: &T) -> bool { true } + fn is_superset(&self, _o: &Self) -> bool { true } +} + +/// Re-expected for the macro. +#[doc(hidden)] +pub use sp_std::{mem::{swap, take}, cell::RefCell, vec::Vec, boxed::Box}; + +#[macro_export] +macro_rules! impl_filter_stack { + ($target:ty, $base:ty, $call:ty, $module:ident) => { + #[cfg(feature = "std")] + mod $module { + #[allow(unused_imports)] + use super::*; + use $crate::traits::filter::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; + + thread_local! { + static FILTER: RefCell bool + 'static>>> = RefCell::new(Vec::new()); + } + + impl Filter<$call> for $target { + fn filter(call: &$call) -> bool { + <$base>::filter(call) && + FILTER.with(|filter| filter.borrow().iter().all(|f| f(call))) + } + } + + impl FilterStack<$call> for $target { + type Stack = Vec bool + 'static>>; + fn push(f: impl Fn(&$call) -> bool + 'static) { + FILTER.with(|filter| filter.borrow_mut().push(Box::new(f))); + } + fn pop() { + FILTER.with(|filter| filter.borrow_mut().pop()); + } + fn take() -> Self::Stack { + FILTER.with(|filter| take(filter.borrow_mut().as_mut())) + } + fn restore(mut s: Self::Stack) { + FILTER.with(|filter| swap(filter.borrow_mut().as_mut(), &mut s)); + } + } + } + + #[cfg(not(feature = "std"))] + mod $module { + #[allow(unused_imports)] + use super::*; + use $crate::traits::{swap, take, RefCell, Vec, Box, Filter, FilterStack}; + + struct ThisFilter(RefCell bool + 'static>>>); + // NOTE: Safe only in wasm (guarded above) because there's only one thread. + unsafe impl Send for ThisFilter {} + unsafe impl Sync for ThisFilter {} + + static FILTER: ThisFilter = ThisFilter(RefCell::new(Vec::new())); + + impl Filter<$call> for $target { + fn filter(call: &$call) -> bool { + <$base>::filter(call) && FILTER.0.borrow().iter().all(|f| f(call)) + } + } + + impl FilterStack<$call> for $target { + type Stack = Vec bool + 'static>>; + fn push(f: impl Fn(&$call) -> bool + 'static) { + FILTER.0.borrow_mut().push(Box::new(f)); + } + fn pop() { + FILTER.0.borrow_mut().pop(); + } + fn take() -> Self::Stack { + take(FILTER.0.borrow_mut().as_mut()) + } + fn restore(mut s: Self::Stack) { + swap(FILTER.0.borrow_mut().as_mut(), &mut s); + } + } + } + } +} + +/// Type that provide some integrity tests. +/// +/// This implemented for modules by `decl_module`. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait IntegrityTest { + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +#[cfg(test)] +pub mod test_impl_filter_stack { + use super::*; + + pub struct IsCallable; + pub struct BaseFilter; + impl Filter for BaseFilter { + fn filter(x: &u32) -> bool { x % 2 == 0 } + } + impl_filter_stack!( + crate::traits::filter::test_impl_filter_stack::IsCallable, + crate::traits::filter::test_impl_filter_stack::BaseFilter, + u32, + is_callable + ); + + #[test] + fn impl_filter_stack_should_work() { + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + + IsCallable::push(|x| *x < 42); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + + IsCallable::push(|x| *x % 3 == 0); + assert!(IsCallable::filter(&36)); + assert!(!IsCallable::filter(&40)); + + IsCallable::pop(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + + let saved = IsCallable::take(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + + IsCallable::restore(saved); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + + IsCallable::pop(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + } + + #[test] + fn guards_should_work() { + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + { + let _guard_1 = FilterStackGuard::::new(|x| *x < 42); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + { + let _guard_2 = FilterStackGuard::::new(|x| *x % 3 == 0); + assert!(IsCallable::filter(&36)); + assert!(!IsCallable::filter(&40)); + } + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + { + let _guard_2 = ClearFilterGuard::::new(); + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + } + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(!IsCallable::filter(&42)); + } + assert!(IsCallable::filter(&36)); + assert!(IsCallable::filter(&40)); + assert!(IsCallable::filter(&42)); + assert!(!IsCallable::filter(&43)); + } +} diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs new file mode 100644 index 0000000000000..5f7b35a9ad25c --- /dev/null +++ b/frame/support/src/traits/hooks.rs @@ -0,0 +1,349 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for hooking tasks to events in a blockchain's lifecycle. + +use sp_arithmetic::traits::Saturating; +use sp_runtime::traits::MaybeSerializeDeserialize; +use impl_trait_for_tuples::impl_for_tuples; + +/// The block initialization trait. +/// +/// Implementing this lets you express what should happen for your pallet when the block is +/// beginning (right before the first extrinsic is executed). +pub trait OnInitialize { + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + /// + /// NOTE: This function is called BEFORE ANY extrinsic in a block is applied, + /// including inherent extrinsics. Hence for instance, if you runtime includes + /// `pallet_timestamp`, the `timestamp` is not yet up to date at this point. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } +} + +#[impl_for_tuples(30)] +impl OnInitialize for Tuple { + fn on_initialize(n: BlockNumber) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(n.clone())); )* ); + weight + } +} + +/// The block finalization trait. +/// +/// Implementing this lets you express what should happen for your pallet when the block is ending. +#[impl_for_tuples(30)] +pub trait OnFinalize { + /// The block is being finalized. Implement to have something happen. + /// + /// NOTE: This function is called AFTER ALL extrinsics in a block are applied, + /// including inherent extrinsics. + fn on_finalize(_n: BlockNumber) {} +} + +/// The block's on idle trait. +/// +/// Implementing this lets you express what should happen for your pallet before +/// block finalization (see `on_finalize` hook) in case any remaining weight is left. +pub trait OnIdle { + /// The block is being finalized. + /// Implement to have something happen in case there is leftover weight. + /// Check the passed `remaining_weight` to make sure it is high enough to allow for + /// your pallet's extra computation. + /// + /// NOTE: This function is called AFTER ALL extrinsics - including inherent extrinsics - + /// in a block are applied but before `on_finalize` is executed. + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight + ) -> crate::weights::Weight { + 0 + } +} + +#[impl_for_tuples(30)] +impl OnIdle for Tuple { + fn on_idle(n: BlockNumber, remaining_weight: crate::weights::Weight) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( + let adjusted_remaining_weight = remaining_weight.saturating_sub(weight); + weight = weight.saturating_add(Tuple::on_idle(n.clone(), adjusted_remaining_weight)); + )* ); + weight + } +} + +/// A trait that will be called at genesis. +/// +/// Implementing this trait for a pallet let's you express operations that should +/// happen at genesis. It will be called in an externalities provided environment and +/// will see the genesis state after all pallets have written their genesis state. +#[impl_for_tuples(30)] +pub trait OnGenesis { + /// Something that should happen at genesis. + fn on_genesis() {} +} + +/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgradeHelpersExt::storage_key`]. +#[cfg(feature = "try-runtime")] +pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; + +/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. +#[cfg(feature = "try-runtime")] +pub trait OnRuntimeUpgradeHelpersExt { + /// Generate a storage key unique to this runtime upgrade. + /// + /// This can be used to communicate data from pre-upgrade to post-upgrade state and check + /// them. See [`Self::set_temp_storage`] and [`Self::get_temp_storage`]. + #[cfg(feature = "try-runtime")] + fn storage_key(ident: &str) -> [u8; 32] { + let prefix = sp_io::hashing::twox_128(ON_RUNTIME_UPGRADE_PREFIX); + let ident = sp_io::hashing::twox_128(ident.as_bytes()); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&prefix); + final_key[16..].copy_from_slice(&ident); + + final_key + } + + /// Get temporary storage data written by [`Self::set_temp_storage`]. + /// + /// Returns `None` if either the data is unavailable or un-decodable. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being read from. + #[cfg(feature = "try-runtime")] + fn get_temp_storage(at: &str) -> Option { + sp_io::storage::get(&Self::storage_key(at)) + .and_then(|bytes| codec::Decode::decode(&mut &*bytes).ok()) + } + + /// Write some temporary data to a specific storage that can be read (potentially in + /// post-upgrade hook) via [`Self::get_temp_storage`]. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being written + /// to. + #[cfg(feature = "try-runtime")] + fn set_temp_storage(data: T, at: &str) { + sp_io::storage::set(&Self::storage_key(at), &data.encode()); + } +} + +#[cfg(feature = "try-runtime")] +impl OnRuntimeUpgradeHelpersExt for U {} + +/// The runtime upgrade trait. +/// +/// Implementing this lets you express what should happen when the runtime upgrades, +/// and changes may need to occur to your module. +pub trait OnRuntimeUpgrade { + /// Perform a module upgrade. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { Ok(()) } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { Ok(()) } +} + +#[impl_for_tuples(30)] +impl OnRuntimeUpgrade for Tuple { + fn on_runtime_upgrade() -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); + weight + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); + result + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); + result + } +} + +/// The pallet hooks trait. Implementing this lets you express some logic to execute. +pub trait Hooks { + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} + + /// This will be run when the block is being finalized (before `on_finalize`). + /// Implement to have something happen using the remaining weight. + /// Will not fire if the remaining weight is 0. + /// Return the weight used, the hook will subtract it from current weight used + /// and pass the result to the next `on_idle` hook if it exists. + fn on_idle( + _n: BlockNumber, + _remaining_weight: crate::weights::Weight + ) -> crate::weights::Weight { + 0 + } + + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } + + /// Perform a module upgrade. + /// + /// NOTE: this doesn't include all pallet logic triggered on runtime upgrade. For instance it + /// doesn't include the write of the pallet version in storage. The final complete logic + /// triggered on runtime upgrade is given by implementation of `OnRuntimeUpgrade` trait by + /// `Pallet`. + /// + /// # Warning + /// + /// This function will be called before we initialized any runtime state, aka `on_initialize` + /// wasn't called yet. So, information like the block number and any other + /// block local data are not accessible. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Implementing this function on a module allows you to perform long-running tasks + /// that make (by default) validators generate transactions that feed results + /// of those long-running computations back on chain. + /// + /// NOTE: This function runs off-chain, so it can access the block state, + /// but cannot preform any alterations. More specifically alterations are + /// not forbidden, but they are not persisted in any way after the worker + /// has finished. + /// + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} + + /// Run integrity test. + /// + /// The test is not executed in a externalities provided environment. + fn integrity_test() {} +} + +/// A trait to define the build function of a genesis config, T and I are placeholder for pallet +/// trait and pallet instance. +#[cfg(feature = "std")] +pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + /// The build function is called within an externalities allowing storage APIs. + /// Thus one can write to storage using regular pallet storages. + fn build(&self); + + /// Build the storage using `build` inside default storage. + fn build_storage(&self) -> Result { + let mut storage = Default::default(); + self.assimilate_storage(&mut storage)?; + Ok(storage) + } + + /// Assimilate the storage for this module into pre-existing overlays. + fn assimilate_storage(&self, storage: &mut sp_runtime::Storage) -> Result<(), String> { + sp_state_machine::BasicExternalities::execute_with_storage(storage, || { + self.build(); + Ok(()) + }) + } +} + +/// A trait which is called when the timestamp is set in the runtime. +#[impl_for_tuples(30)] +pub trait OnTimestampSet { + /// Called when the timestamp is set. + fn on_timestamp_set(moment: Moment); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::traits::metadata::PalletVersion; + + #[test] + fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { + struct Test; + impl OnInitialize for Test { + fn on_initialize(_n: u8) -> crate::weights::Weight { + 10 + } + } + impl OnRuntimeUpgrade for Test { + fn on_runtime_upgrade() -> crate::weights::Weight { + 20 + } + } + + assert_eq!(<(Test, Test)>::on_initialize(0), 20); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + } + + #[test] + fn check_pallet_version_ordering() { + let version = PalletVersion::new(1, 0, 0); + assert!(version > PalletVersion::new(0, 1, 2)); + assert!(version == PalletVersion::new(1, 0, 0)); + assert!(version < PalletVersion::new(1, 0, 1)); + assert!(version < PalletVersion::new(1, 1, 0)); + + let version = PalletVersion::new(2, 50, 50); + assert!(version < PalletVersion::new(2, 50, 51)); + assert!(version > PalletVersion::new(2, 49, 51)); + assert!(version < PalletVersion::new(3, 49, 51)); + } +} diff --git a/frame/support/src/traits/members.rs b/frame/support/src/traits/members.rs new file mode 100644 index 0000000000000..8b9c2c90f541d --- /dev/null +++ b/frame/support/src/traits/members.rs @@ -0,0 +1,209 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with the idea of membership. + +use sp_std::{prelude::*, marker::PhantomData}; + +/// A trait for querying whether a type can be said to "contain" a value. +pub trait Contains { + /// Return `true` if this "contains" the given value `t`. + fn contains(t: &T) -> bool; +} + +/// A `Contains` implementation which always returns `true`. +pub struct All(PhantomData); +impl Contains for All { + fn contains(_: &T) -> bool { true } +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl Contains for Tuple { + fn contains(t: &T) -> bool { + for_tuples!( #( + if Tuple::contains(t) { return true } + )* ); + false + } +} + +/// Create a type which implements the `Contains` trait for a particular type with syntax similar +/// to `matches!`. +#[macro_export] +macro_rules! match_type { + ( pub type $n:ident: impl Contains<$t:ty> = { $phead:pat $( | $ptail:pat )* } ; ) => { + pub struct $n; + impl $crate::traits::Contains<$t> for $n { + fn contains(l: &$t) -> bool { + matches!(l, $phead $( | $ptail )* ) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + match_type! { + pub type OneOrTenToTwenty: impl Contains = { 1 | 10..=20 }; + } + + #[test] + fn match_type_works() { + for i in 0..=255 { + assert_eq!(OneOrTenToTwenty::contains(&i), i == 1 || i >= 10 && i <= 20); + } + } +} + +/// A trait for a set which can enumerate its members in order. +pub trait SortedMembers { + /// Get a vector of all members in the set, ordered. + fn sorted_members() -> Vec; + + /// Return `true` if this "contains" the given value `t`. + fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } + + /// Get the number of items in the set. + fn count() -> usize { Self::sorted_members().len() } + + /// Add an item that would satisfy `contains`. It does not make sure any other + /// state is correctly maintained or generated. + /// + /// **Should be used for benchmarking only!!!** + #[cfg(feature = "runtime-benchmarks")] + fn add(_t: &T) { unimplemented!() } +} + +/// Adapter struct for turning an `OrderedMembership` impl into a `Contains` impl. +pub struct AsContains(PhantomData<(OM,)>); +impl> Contains for AsContains { + fn contains(t: &T) -> bool { OM::contains(t) } +} + +/// Trivial utility for implementing `Contains`/`OrderedMembership` with a `Vec`. +pub struct IsInVec(PhantomData); +impl>> Contains for IsInVec { + fn contains(t: &X) -> bool { T::get().contains(t) } +} +impl>> SortedMembers for IsInVec { + fn sorted_members() -> Vec { let mut r = T::get(); r.sort(); r } +} + +/// A trait for querying bound for the length of an implementation of `Contains` +pub trait ContainsLengthBound { + /// Minimum number of elements contained + fn min_len() -> usize; + /// Maximum number of elements contained + fn max_len() -> usize; +} + +/// Trait for type that can handle the initialization of account IDs at genesis. +pub trait InitializeMembers { + /// Initialize the members to the given `members`. + fn initialize_members(members: &[AccountId]); +} + +impl InitializeMembers for () { + fn initialize_members(_: &[T]) {} +} + +/// Trait for type that can handle incremental changes to a set of account IDs. +pub trait ChangeMembers { + /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The + /// new set is given by `new`, and need not be sorted. + /// + /// This resets any previous value of prime. + fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { + new.sort(); + Self::change_members_sorted(incoming, outgoing, &new[..]); + } + + /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The + /// new set is thus given by `sorted_new` and **must be sorted**. + /// + /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. + /// + /// This resets any previous value of prime. + fn change_members_sorted( + incoming: &[AccountId], + outgoing: &[AccountId], + sorted_new: &[AccountId], + ); + + /// Set the new members; they **must already be sorted**. This will compute the diff and use it to + /// call `change_members_sorted`. + /// + /// This resets any previous value of prime. + fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { + let (incoming, outgoing) = Self::compute_members_diff_sorted(new_members, old_members); + Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); + } + + /// Compute diff between new and old members; they **must already be sorted**. + /// + /// Returns incoming and outgoing members. + fn compute_members_diff_sorted( + new_members: &[AccountId], + old_members: &[AccountId], + ) -> (Vec, Vec) { + let mut old_iter = old_members.iter(); + let mut new_iter = new_members.iter(); + let mut incoming = Vec::new(); + let mut outgoing = Vec::new(); + let mut old_i = old_iter.next(); + let mut new_i = new_iter.next(); + loop { + match (old_i, new_i) { + (None, None) => break, + (Some(old), Some(new)) if old == new => { + old_i = old_iter.next(); + new_i = new_iter.next(); + } + (Some(old), Some(new)) if old < new => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + } + (Some(old), None) => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + } + (_, Some(new)) => { + incoming.push(new.clone()); + new_i = new_iter.next(); + } + } + } + (incoming, outgoing) + } + + /// Set the prime member. + fn set_prime(_prime: Option) {} + + /// Get the current prime. + fn get_prime() -> Option { + None + } +} + +impl ChangeMembers for () { + fn change_members(_: &[T], _: &[T], _: Vec) {} + fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} + fn set_members_sorted(_: &[T], _: &[T]) {} + fn set_prime(_: Option) {} +} diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs new file mode 100644 index 0000000000000..b13a0464b30c0 --- /dev/null +++ b/frame/support/src/traits/metadata.rs @@ -0,0 +1,178 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for managing information attached to pallets and their constituents. + +use codec::{Encode, Decode}; +use sp_runtime::RuntimeDebug; + +/// Provides information about the pallet setup in the runtime. +/// +/// An implementor should be able to provide information about each pallet that +/// is configured in `construct_runtime!`. +pub trait PalletInfo { + /// Convert the given pallet `P` into its index as configured in the runtime. + fn index() -> Option; + /// Convert the given pallet `P` into its name as configured in the runtime. + fn name() -> Option<&'static str>; +} + +/// Provides information about the pallet setup in the runtime. +/// +/// Access the information provided by [`PalletInfo`] for a specific pallet. +pub trait PalletInfoAccess { + /// Index of the pallet as configured in the runtime. + fn index() -> usize; + /// Name of the pallet as configured in the runtime. + fn name() -> &'static str; +} + +/// The function and pallet name of the Call. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] +pub struct CallMetadata { + /// Name of the function. + pub function_name: &'static str, + /// Name of the pallet to which the function belongs. + pub pallet_name: &'static str, +} + +/// Gets the function name of the Call. +pub trait GetCallName { + /// Return all function names. + fn get_call_names() -> &'static [&'static str]; + /// Return the function name of the Call. + fn get_call_name(&self) -> &'static str; +} + +/// Gets the metadata for the Call - function name and pallet name. +pub trait GetCallMetadata { + /// Return all module names. + fn get_module_names() -> &'static [&'static str]; + /// Return all function names for the given `module`. + fn get_call_names(module: &str) -> &'static [&'static str]; + /// Return a [`CallMetadata`], containing function and pallet name of the Call. + fn get_call_metadata(&self) -> CallMetadata; +} + +/// The storage key postfix that is used to store the [`PalletVersion`] per pallet. +/// +/// The full storage key is built by using: +/// Twox128([`PalletInfo::name`]) ++ Twox128([`PALLET_VERSION_STORAGE_KEY_POSTFIX`]) +pub const PALLET_VERSION_STORAGE_KEY_POSTFIX: &[u8] = b":__PALLET_VERSION__:"; + +/// The version of a pallet. +/// +/// Each pallet version is stored in the state under a fixed key. See +/// [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] for how this key is built. +#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Ord, Clone, Copy)] +pub struct PalletVersion { + /// The major version of the pallet. + pub major: u16, + /// The minor version of the pallet. + pub minor: u8, + /// The patch version of the pallet. + pub patch: u8, +} + +impl PalletVersion { + /// Creates a new instance of `Self`. + pub fn new(major: u16, minor: u8, patch: u8) -> Self { + Self { + major, + minor, + patch, + } + } + + /// Returns the storage key for a pallet version. + /// + /// See [`PALLET_VERSION_STORAGE_KEY_POSTFIX`] on how this key is built. + /// + /// Returns `None` if the given `PI` returned a `None` as name for the given + /// `Pallet`. + pub fn storage_key() -> Option<[u8; 32]> { + let pallet_name = PI::name::()?; + + let pallet_name = sp_io::hashing::twox_128(pallet_name.as_bytes()); + let postfix = sp_io::hashing::twox_128(PALLET_VERSION_STORAGE_KEY_POSTFIX); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&pallet_name); + final_key[16..].copy_from_slice(&postfix); + + Some(final_key) + } + + /// Put this pallet version into the storage. + /// + /// It will use the storage key that is associated with the given `Pallet`. + /// + /// # Panics + /// + /// This function will panic iff `Pallet` can not be found by `PalletInfo`. + /// In a runtime that is put together using + /// [`construct_runtime!`](crate::construct_runtime) this should never happen. + /// + /// It will also panic if this function isn't executed in an externalities + /// provided environment. + pub fn put_into_storage(&self) { + let key = Self::storage_key::() + .expect("Every active pallet has a name in the runtime; qed"); + + crate::storage::unhashed::put(&key, self); + } +} + +impl sp_std::cmp::PartialOrd for PalletVersion { + fn partial_cmp(&self, other: &Self) -> Option { + let res = self.major + .cmp(&other.major) + .then_with(|| + self.minor + .cmp(&other.minor) + .then_with(|| self.patch.cmp(&other.patch) + )); + + Some(res) + } +} + +/// Provides version information about a pallet. +/// +/// This trait provides two functions for returning the version of a +/// pallet. There is a state where both functions can return distinct versions. +/// See [`GetPalletVersion::storage_version`] for more information about this. +pub trait GetPalletVersion { + /// Returns the current version of the pallet. + fn current_version() -> PalletVersion; + + /// Returns the version of the pallet that is stored in storage. + /// + /// Most of the time this will return the exact same version as + /// [`GetPalletVersion::current_version`]. Only when being in + /// a state after a runtime upgrade happened and the pallet did + /// not yet updated its version in storage, this will return a + /// different(the previous, seen from the time of calling) version. + /// + /// See [`PalletVersion`] for more information. + /// + /// # Note + /// + /// If there was no previous version of the pallet stored in the state, + /// this function returns `None`. + fn storage_version() -> Option; +} diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs new file mode 100644 index 0000000000000..d3010358dd883 --- /dev/null +++ b/frame/support/src/traits/misc.rs @@ -0,0 +1,323 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Smaller traits used in FRAME which don't need their own file. + +use sp_runtime::traits::{StoredMapError, Block as BlockT}; +use sp_arithmetic::traits::AtLeast32Bit; +use crate::dispatch::Parameter; + +/// Anything that can have a `::len()` method. +pub trait Len { + /// Return the length of data type. + fn len(&self) -> usize; +} + +impl Len for T where ::IntoIter: ExactSizeIterator { + fn len(&self) -> usize { + self.clone().into_iter().len() + } +} + +/// A trait for querying a single value from a type. +/// +/// It is not required that the value is constant. +pub trait Get { + /// Return the current value. + fn get() -> T; +} + +impl Get for () { + fn get() -> T { T::default() } +} + +/// Implement Get by returning Default for any type that implements Default. +pub struct GetDefault; +impl Get for GetDefault { + fn get() -> T { + T::default() + } +} + +/// A type for which some values make sense to be able to drop without further consideration. +pub trait TryDrop: Sized { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self>; +} + +/// Return type used when we need to return one of two items, each of the opposite direction or +/// sign, with one (`Same`) being of the same type as the `self` or primary argument of the function +/// that returned it. +pub enum SameOrOther { + /// No item. + None, + /// An item of the same type as the `Self` on which the return function was called. + Same(A), + /// An item of the opposite type to the `Self` on which the return function was called. + Other(B), +} + +impl TryDrop for SameOrOther { + fn try_drop(self) -> Result<(), Self> { + if let SameOrOther::None = self { + Ok(()) + } else { + Err(self) + } + } +} + +impl SameOrOther { + /// Returns `Ok` with the inner value of `Same` if `self` is that, otherwise returns `Err` with + /// `self`. + pub fn try_same(self) -> Result { + match self { + SameOrOther::Same(a) => Ok(a), + x => Err(x), + } + } + + /// Returns `Ok` with the inner value of `Other` if `self` is that, otherwise returns `Err` with + /// `self`. + pub fn try_other(self) -> Result { + match self { + SameOrOther::Other(b) => Ok(b), + x => Err(x), + } + } + + /// Returns `Ok` if `self` is `None`, otherwise returns `Err` with `self`. + pub fn try_none(self) -> Result<(), Self> { + match self { + SameOrOther::None => Ok(()), + x => Err(x), + } + } + + pub fn same(self) -> Result where A: Default { + match self { + SameOrOther::Same(a) => Ok(a), + SameOrOther::None => Ok(A::default()), + SameOrOther::Other(b) => Err(b), + } + } + + pub fn other(self) -> Result where B: Default { + match self { + SameOrOther::Same(a) => Err(a), + SameOrOther::None => Ok(B::default()), + SameOrOther::Other(b) => Ok(b), + } + } +} + +/// Handler for when a new account has been created. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnNewAccount { + /// A new account `who` has been registered. + fn on_new_account(who: &AccountId); +} + +/// The account with the given id was reaped. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OnKilledAccount { + /// The account with the given id was reaped. + fn on_killed_account(who: &AccountId); +} + +/// A simple, generic one-parameter event notifier/handler. +pub trait HandleLifetime { + /// An account was created. + fn created(_t: &T) -> Result<(), StoredMapError> { Ok(()) } + + /// An account was killed. + fn killed(_t: &T) -> Result<(), StoredMapError> { Ok(()) } +} + +impl HandleLifetime for () {} + +pub trait Time { + type Moment: AtLeast32Bit + Parameter + Default + Copy; + + fn now() -> Self::Moment; +} + +/// Trait to deal with unix time. +pub trait UnixTime { + /// Return duration since `SystemTime::UNIX_EPOCH`. + fn now() -> core::time::Duration; +} + +/// Trait to be used when types are exactly same. +/// +/// This allow to convert back and forth from type, a reference and a mutable reference. +pub trait IsType: Into + From { + /// Cast reference. + fn from_ref(t: &T) -> &Self; + + /// Cast reference. + fn into_ref(&self) -> &T; + + /// Cast mutable reference. + fn from_mut(t: &mut T) -> &mut Self; + + /// Cast mutable reference. + fn into_mut(&mut self) -> &mut T; +} + +impl IsType for T { + fn from_ref(t: &T) -> &Self { t } + fn into_ref(&self) -> &T { self } + fn from_mut(t: &mut T) -> &mut Self { t } + fn into_mut(&mut self) -> &mut T { self } +} + +/// Something that can be checked to be a of sub type `T`. +/// +/// This is useful for enums where each variant encapsulates a different sub type, and +/// you need access to these sub types. +/// +/// For example, in FRAME, this trait is implemented for the runtime `Call` enum. Pallets use this +/// to check if a certain call is an instance of the local pallet's `Call` enum. +/// +/// # Example +/// +/// ``` +/// # use frame_support::traits::IsSubType; +/// +/// enum Test { +/// String(String), +/// U32(u32), +/// } +/// +/// impl IsSubType for Test { +/// fn is_sub_type(&self) -> Option<&String> { +/// match self { +/// Self::String(ref r) => Some(r), +/// _ => None, +/// } +/// } +/// } +/// +/// impl IsSubType for Test { +/// fn is_sub_type(&self) -> Option<&u32> { +/// match self { +/// Self::U32(ref r) => Some(r), +/// _ => None, +/// } +/// } +/// } +/// +/// fn main() { +/// let data = Test::String("test".into()); +/// +/// assert_eq!("test", IsSubType::::is_sub_type(&data).unwrap().as_str()); +/// } +/// ``` +pub trait IsSubType { + /// Returns `Some(_)` if `self` is an instance of sub type `T`. + fn is_sub_type(&self) -> Option<&T>; +} + +/// Something that can execute a given block. +/// +/// Executing a block means that all extrinsics in a given block will be executed and the resulting +/// header will be checked against the header of the given block. +pub trait ExecuteBlock { + /// Execute the given `block`. + /// + /// This will execute all extrinsics in the block and check that the resulting header is correct. + /// + /// # Panic + /// + /// Panics when an extrinsics panics or the resulting header doesn't match the expected header. + fn execute_block(block: Block); +} + +/// Off-chain computation trait. +/// +/// Implementing this trait on a module allows you to perform long-running tasks +/// that make (by default) validators generate transactions that feed results +/// of those long-running computations back on chain. +/// +/// NOTE: This function runs off-chain, so it can access the block state, +/// but cannot preform any alterations. More specifically alterations are +/// not forbidden, but they are not persisted in any way after the worker +/// has finished. +#[impl_trait_for_tuples::impl_for_tuples(30)] +pub trait OffchainWorker { + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} +} + +/// Some amount of backing from a group. The precise defintion of what it means to "back" something +/// is left flexible. +pub struct Backing { + /// The number of members of the group that back some motion. + pub approvals: u32, + /// The total count of group members. + pub eligible: u32, +} + +/// Retrieve the backing from an object's ref. +pub trait GetBacking { + /// Returns `Some` `Backing` if `self` represents a fractional/groupwise backing of some + /// implicit motion. `None` if it does not. + fn get_backing(&self) -> Option; +} + + + +/// A trait to ensure the inherent are before non-inherent in a block. +/// +/// This is typically implemented on runtime, through `construct_runtime!`. +pub trait EnsureInherentsAreFirst { + /// Ensure the position of inherent is correct, i.e. they are before non-inherents. + /// + /// On error return the index of the inherent with invalid position (counting from 0). + fn ensure_inherents_are_first(block: &Block) -> Result<(), u32>; +} + +/// An extrinsic on which we can get access to call. +pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { + /// Get the call of the extrinsic. + fn call(&self) -> &Self::Call; +} + +#[cfg(feature = "std")] +impl ExtrinsicCall for sp_runtime::testing::TestXt where + Call: codec::Codec + Sync + Send, +{ + fn call(&self) -> &Self::Call { + &self.call + } +} + +impl ExtrinsicCall +for sp_runtime::generic::UncheckedExtrinsic +where + Extra: sp_runtime::traits::SignedExtension, +{ + fn call(&self) -> &Self::Call { + &self.function + } +} diff --git a/frame/support/src/traits/randomness.rs b/frame/support/src/traits/randomness.rs new file mode 100644 index 0000000000000..865893f99b393 --- /dev/null +++ b/frame/support/src/traits/randomness.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with on-chain randomness. + +/// A trait that is able to provide randomness. +/// +/// Being a deterministic blockchain, real randomness is difficult to come by, different +/// implementations of this trait will provide different security guarantees. At best, +/// this will be randomness which was hard to predict a long time ago, but that has become +/// easy to predict recently. +pub trait Randomness { + /// Get the most recently determined random seed, along with the time in the past + /// since when it was determinable by chain observers. + /// + /// `subject` is a context identifier and allows you to get a different result to + /// other callers of this function; use it like `random(&b"my context"[..])`. + /// + /// NOTE: The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + fn random(subject: &[u8]) -> (Output, BlockNumber); + + /// Get the basic random seed. + /// + /// In general you won't want to use this, but rather `Self::random` which allows + /// you to give a subject for the random result and whose value will be + /// independently low-influence random from any other such seeds. + /// + /// NOTE: The returned seed should only be used to distinguish commitments made before + /// the returned block number. If the block number is too early (i.e. commitments were + /// made afterwards), then ensure no further commitments may be made and repeatedly + /// call this on later blocks until the block number returned is later than the latest + /// commitment. + fn random_seed() -> (Output, BlockNumber) { + Self::random(&[][..]) + } +} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs new file mode 100644 index 0000000000000..58e4c419f2813 --- /dev/null +++ b/frame/support/src/traits/schedule.rs @@ -0,0 +1,133 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated utilities for scheduling dispatchables in FRAME. + +use sp_std::{prelude::*, fmt::Debug}; +use codec::{Encode, Decode, Codec, EncodeLike}; +use sp_runtime::{RuntimeDebug, DispatchError}; + +/// Information relating to the period of a scheduled task. First item is the length of the +/// period and the second is the number of times it should be executed in total before the task +/// is considered finished and removed. +pub type Period = (BlockNumber, u32); + +/// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning +/// higher priority. +pub type Priority = u8; + +/// The dispatch time of a scheduled task. +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum DispatchTime { + /// At specified block. + At(BlockNumber), + /// After specified number of blocks. + After(BlockNumber), +} + +/// The highest priority. We invert the value so that normal sorting will place the highest +/// priority at the beginning of the list. +pub const HIGHEST_PRIORITY: Priority = 0; +/// Anything of this value or lower will definitely be scheduled on the block that they ask for, even +/// if it breaches the `MaximumWeight` limitation. +pub const HARD_DEADLINE: Priority = 63; +/// The lowest priority. Most stuff should be around here. +pub const LOWEST_PRIORITY: Priority = 255; + +/// A type that can be used as a scheduler. +pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + fn schedule( + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Call + ) -> Result; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), ()>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. For periodic tasks, + /// this dispatch is guaranteed to succeed only before the *initial* execution; for + /// others, use `reschedule_named`. + /// + /// Will return an error if the `address` is invalid. + fn reschedule( + address: Self::Address, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an error if the `address` is invalid. + fn next_dispatch_time(address: Self::Address) -> Result; +} + +/// A type that can be used as a scheduler. +pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: Vec, + when: DispatchTime, + maybe_periodic: Option>, + priority: Priority, + origin: Origin, + call: Call + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: Vec) -> Result<(), ()>; + + /// Reschedule a task. For one-off tasks, this dispatch is guaranteed to succeed + /// only if it is executed *before* the currently scheduled block. + fn reschedule_named( + id: Vec, + when: DispatchTime, + ) -> Result; + + /// Return the next dispatch time for a given task. + /// + /// Will return an error if the `id` is invalid. + fn next_dispatch_time(id: Vec) -> Result; +} diff --git a/frame/support/src/traits/storage.rs b/frame/support/src/traits/storage.rs new file mode 100644 index 0000000000000..c42e1abf73ea3 --- /dev/null +++ b/frame/support/src/traits/storage.rs @@ -0,0 +1,47 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for encoding data related to pallet's storage items. + +/// An instance of a pallet in the storage. +/// +/// It is required that these instances are unique, to support multiple instances per pallet in the same runtime! +/// +/// E.g. for module MyModule default instance will have prefix "MyModule" and other instances +/// "InstanceNMyModule". +pub trait Instance: 'static { + /// Unique module prefix. E.g. "InstanceNMyModule" or "MyModule" + const PREFIX: &'static str; + /// Unique numerical identifier for an instance. + const INDEX: u8; +} + +/// An instance of a storage in a pallet. +/// +/// Define an instance for an individual storage inside a pallet. +/// The pallet prefix is used to isolate the storage between pallets, and the storage prefix is +/// used to isolate storages inside a pallet. +/// +/// NOTE: These information can be used to define storages in pallet such as a `StorageMap` which +/// can use keys after `twox_128(pallet_prefix())++twox_128(STORAGE_PREFIX)` +pub trait StorageInstance { + /// Prefix of a pallet to isolate it from other pallets. + fn pallet_prefix() -> &'static str; + + /// Prefix given to a storage to isolate from other storages in the pallet. + const STORAGE_PREFIX: &'static str; +} diff --git a/frame/support/src/traits/stored_map.rs b/frame/support/src/traits/stored_map.rs new file mode 100644 index 0000000000000..10964541ab32b --- /dev/null +++ b/frame/support/src/traits/stored_map.rs @@ -0,0 +1,141 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated datatypes for managing abstract stored values. + +use codec::FullCodec; +use sp_runtime::traits::StoredMapError; +use crate::storage::StorageMap; +use crate::traits::misc::HandleLifetime; + +/// An abstraction of a value stored within storage, but possibly as part of a larger composite +/// item. +pub trait StoredMap { + /// Get the item, or its default if it doesn't yet exist; we make no distinction between the + /// two. + fn get(k: &K) -> T; + + /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is + /// returned. It is removed or reset to default value if it has been mutated to `None` + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result; + + // Everything past here has a default implementation. + + /// Mutate the item. + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + Self::mutate_exists(k, |maybe_account| match maybe_account { + Some(ref mut account) => f(account), + x @ None => { + let mut account = Default::default(); + let r = f(&mut account); + *x = Some(account); + r + } + }) + } + + /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. + /// + /// This is infallible as long as the value does not get destroyed. + fn mutate_exists( + k: &K, + f: impl FnOnce(&mut Option) -> R, + ) -> Result { + Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) + } + + /// Set the item to something new. + fn insert(k: &K, t: T) -> Result<(), StoredMapError> { Self::mutate(k, |i| *i = t) } + + /// Remove the item or otherwise replace it with its default value; we don't care which. + fn remove(k: &K) -> Result<(), StoredMapError> { Self::mutate_exists(k, |x| *x = None) } +} + +/// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this +/// wouldn't be needed as `StorageValue`s should blanket implement `StoredValue`s, however this +/// would break the ability to have custom impls of `StoredValue`. The other workaround is to +/// implement it directly in the macro. +/// +/// This form has the advantage that two additional types are provides, `Created` and `Removed`, +/// which are both generic events that can be tied to handlers to do something in the case of being +/// about to create an account where one didn't previously exist (at all; not just where it used to +/// be the default value), or where the account is being removed or reset back to the default value +/// where previously it did exist (though may have been in a default state). This works well with +/// system module's `CallOnCreatedAccount` and `CallKillAccount`. +pub struct StorageMapShim(sp_std::marker::PhantomData<(S, L, K, T)>); +impl< + S: StorageMap, + L: HandleLifetime, + K: FullCodec, + T: FullCodec + Default, +> StoredMap for StorageMapShim { + fn get(k: &K) -> T { S::get(k) } + fn insert(k: &K, t: T) -> Result<(), StoredMapError> { + if !S::contains_key(&k) { + L::created(k)?; + } + S::insert(k, t); + Ok(()) + } + fn remove(k: &K) -> Result<(), StoredMapError> { + if S::contains_key(&k) { + L::killed(&k)?; + S::remove(k); + } + Ok(()) + } + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> Result { + if !S::contains_key(&k) { + L::created(k)?; + } + Ok(S::mutate(k, f)) + } + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value); + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k)?; + } else if existed && !exists { + L::killed(k)?; + } + Ok(r) + }) + } + fn try_mutate_exists>( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value)?; + let exists = maybe_value.is_some(); + + if !existed && exists { + L::created(k).map_err(E::from)?; + } else if existed && !exists { + L::killed(k).map_err(E::from)?; + } + Ok(r) + }) + } +} diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs new file mode 100644 index 0000000000000..82af5dbade8f7 --- /dev/null +++ b/frame/support/src/traits/tokens.rs @@ -0,0 +1,28 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for working with tokens and their associated datastructures. + +pub mod fungible; +pub mod fungibles; +pub mod currency; +pub mod imbalance; +mod misc; +pub use misc::{ + WithdrawConsequence, DepositConsequence, ExistenceRequirement, BalanceStatus, WithdrawReasons, +}; +pub use imbalance::Imbalance; diff --git a/frame/support/src/traits/tokens/currency.rs b/frame/support/src/traits/tokens/currency.rs new file mode 100644 index 0000000000000..567ca44aa78c7 --- /dev/null +++ b/frame/support/src/traits/tokens/currency.rs @@ -0,0 +1,208 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The Currency trait and associated types. + +use sp_std::fmt::Debug; +use sp_runtime::traits::MaybeSerializeDeserialize; +use crate::dispatch::{DispatchResult, DispatchError}; +use super::misc::{Balance, WithdrawReasons, ExistenceRequirement}; +use super::imbalance::{Imbalance, SignedImbalance}; + + +mod reservable; +pub use reservable::ReservableCurrency; +mod lockable; +pub use lockable::{LockableCurrency, VestingSchedule, LockIdentifier}; + +/// Abstraction over a fungible assets system. +pub trait Currency { + /// The balance of an account. + type Balance: Balance + MaybeSerializeDeserialize + Debug; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type PositiveImbalance: Imbalance; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type NegativeImbalance: Imbalance; + + // PUBLIC IMMUTABLES + + /// The combined balance of `who`. + fn total_balance(who: &AccountId) -> Self::Balance; + + /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no + /// balance changes in the meantime and only the reserved balance is not taken into account. + fn can_slash(who: &AccountId, value: Self::Balance) -> bool; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. This is equivalent to the `Balances` module's + /// `ExistentialDeposit`. + fn minimum_balance() -> Self::Balance; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn burn(amount: Self::Balance) -> Self::PositiveImbalance; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> Self::NegativeImbalance; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (Self::PositiveImbalance, Self::NegativeImbalance) { + (Self::burn(amount.clone()), Self::issue(amount)) + } + + /// The 'free' balance of a given account. + /// + /// This is the only balance that matters in terms of most operations on tokens. It alone + /// is used to determine the balance when in the contract execution environment. When this + /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is + /// deleted: specifically `FreeBalance`. + /// + /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn free_balance(who: &AccountId) -> Self::Balance; + + /// Returns `Ok` iff the account is able to make a withdrawal of the given amount + /// for the given reason. Basically, it's just a dry-run of `withdraw`. + /// + /// `Err(...)` with the reason why not otherwise. + fn ensure_can_withdraw( + who: &AccountId, + _amount: Self::Balance, + reasons: WithdrawReasons, + new_balance: Self::Balance, + ) -> DispatchResult; + + // PUBLIC MUTABLES (DANGEROUS) + + /// Transfer some liquid free balance to another staker. + /// + /// This is a very high-level function. It will ensure all appropriate fees are paid + /// and no imbalance in the system remains. + fn transfer( + source: &AccountId, + dest: &AccountId, + value: Self::Balance, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult; + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + who: &AccountId, + value: Self::Balance + ) -> (Self::NegativeImbalance, Self::Balance); + + /// Mints `value` to the free balance of `who`. + /// + /// If `who` doesn't exist, nothing is done and an Err returned. + fn deposit_into_existing( + who: &AccountId, + value: Self::Balance + ) -> Result; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_into_existing( + who: &AccountId, + value: Self::NegativeImbalance, + ) -> Result<(), Self::NegativeImbalance> { + let v = value.peek(); + match Self::deposit_into_existing(who, v) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. + /// + /// Infallible. + fn deposit_creating( + who: &AccountId, + value: Self::Balance, + ) -> Self::PositiveImbalance; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_creating( + who: &AccountId, + value: Self::NegativeImbalance, + ) { + let v = value.peek(); + drop(value.offset(Self::deposit_creating(who, v))); + } + + /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is + /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. + /// + /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, + /// then it returns `Err`. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is `value`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> Result; + + /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. + fn settle( + who: &AccountId, + value: Self::PositiveImbalance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> Result<(), Self::PositiveImbalance> { + let v = value.peek(); + match Self::withdraw(who, v, reasons, liveness) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Ensure an account's free balance equals some value; this will create the account + /// if needed. + /// + /// Returns a signed imbalance and status to indicate if the account was successfully updated or update + /// has led to killing of the account. + fn make_free_balance_be( + who: &AccountId, + balance: Self::Balance, + ) -> SignedImbalance; +} diff --git a/frame/support/src/traits/tokens/currency/lockable.rs b/frame/support/src/traits/tokens/currency/lockable.rs new file mode 100644 index 0000000000000..ed3d1cf46362b --- /dev/null +++ b/frame/support/src/traits/tokens/currency/lockable.rs @@ -0,0 +1,104 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The lockable currency trait and some associated types. + +use crate::dispatch::DispatchResult; +use crate::traits::misc::Get; +use super::Currency; +use super::super::misc::WithdrawReasons; + +/// An identifier for a lock. Used for disambiguating different locks so that +/// they can be individually replaced or removed. +pub type LockIdentifier = [u8; 8]; + +/// A currency whose accounts can have liquidity restrictions. +pub trait LockableCurrency: Currency { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The maximum number of locks a user should have on their account. + type MaxLocks: Get; + + /// Create a new balance lock on account `who`. + /// + /// If the new lock is valid (i.e. not already expired), it will push the struct to + /// the `Locks` vec in storage. Note that you can lock more funds than a user has. + /// + /// If the lock `id` already exists, this will update it. + fn set_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all + /// parameters or creates a new one if it does not exist. + /// + /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it + /// applies the most severe constraints of the two, while `set_lock` replaces the lock + /// with the new parameters. As in, `extend_lock` will set: + /// - maximum `amount` + /// - bitwise mask of all `reasons` + fn extend_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Remove an existing lock. + fn remove_lock( + id: LockIdentifier, + who: &AccountId, + ); +} + +/// A vesting schedule over a currency. This allows a particular currency to have vesting limits +/// applied to it. +pub trait VestingSchedule { + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The currency that this schedule applies to. + type Currency: Currency; + + /// Get the amount that is currently being vested and cannot be transferred out of this account. + /// Returns `None` if the account has no vesting schedule. + fn vesting_balance(who: &AccountId) -> Option<>::Balance>; + + /// Adds a vesting schedule to a given account. + /// + /// If there already exists a vesting schedule for the given account, an `Err` is returned + /// and nothing is updated. + /// + /// Is a no-op if the amount to be vested is zero. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn add_vesting_schedule( + who: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; + + /// Remove a vesting schedule for a given account. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn remove_vesting_schedule(who: &AccountId); +} diff --git a/frame/support/src/traits/tokens/currency/reservable.rs b/frame/support/src/traits/tokens/currency/reservable.rs new file mode 100644 index 0000000000000..14ea1d3a16fb6 --- /dev/null +++ b/frame/support/src/traits/tokens/currency/reservable.rs @@ -0,0 +1,83 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The reservable currency trait. + +use super::Currency; +use super::super::misc::BalanceStatus; +use crate::dispatch::{DispatchResult, DispatchError}; + +/// A currency where funds can be reserved from the user. +pub trait ReservableCurrency: Currency { + /// Same result as `reserve(who, value)` (but without the side-effects) assuming there + /// are no balance changes in the meantime. + fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved( + who: &AccountId, + value: Self::Balance + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance(who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, + /// depending on the `status`. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved( + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> Result; +} diff --git a/frame/support/src/traits/tokens/fungible.rs b/frame/support/src/traits/tokens/fungible.rs new file mode 100644 index 0000000000000..5472212aaa65e --- /dev/null +++ b/frame/support/src/traits/tokens/fungible.rs @@ -0,0 +1,310 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for dealing with a single fungible token class and any associated types. + +use super::*; +use sp_runtime::traits::Saturating; +use crate::traits::misc::Get; +use crate::dispatch::{DispatchResult, DispatchError}; +use super::misc::{DepositConsequence, WithdrawConsequence, Balance}; + +mod balanced; +mod imbalance; +pub use balanced::{Balanced, Unbalanced}; +pub use imbalance::{Imbalance, HandleImbalanceDrop, DebtOf, CreditOf}; + +/// Trait for providing balance-inspection access to a fungible asset. +pub trait Inspect { + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. + fn minimum_balance() -> Self::Balance; + + /// Get the balance of `who`. + fn balance(who: &AccountId) -> Self::Balance; + + /// Get the maximum amount that `who` can withdraw/transfer successfully. + fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance; + + /// Returns `true` if the balance of `who` may be increased by `amount`. + fn can_deposit(who: &AccountId, amount: Self::Balance) -> DepositConsequence; + + /// Returns `Failed` if the balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence; +} + +/// Trait for providing an ERC-20 style fungible asset. +pub trait Mutate: Inspect { + /// Increase the balance of `who` by exactly `amount`, minting new tokens. If that isn't + /// possible then an `Err` is returned and nothing is changed. + fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Decrease the balance of `who` by at least `amount`, possibly slightly more in the case of + /// minimum_balance requirements, burning the tokens. If that isn't possible then an `Err` is + /// returned and nothing is changed. If successful, the amount of tokens reduced is returned. + fn burn_from(who: &AccountId, amount: Self::Balance) -> Result; + + /// Attempt to reduce the balance of `who` by as much as possible up to `amount`, and possibly + /// slightly more due to minimum_balance requirements. If no decrease is possible then an `Err` + /// is returned and nothing is changed. If successful, the amount of tokens reduced is returned. + /// + /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure + /// that is doesn't fail. + fn slash(who: &AccountId, amount: Self::Balance) -> Result { + Self::burn_from(who, Self::reducible_balance(who, false).min(amount)) + } + + /// Transfer funds from one account into another. The default implementation uses `mint_into` + /// and `burn_from` and may generate unwanted events. + fn teleport( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> Result { + let extra = Self::can_withdraw(&source, amount).into_result()?; + Self::can_deposit(&dest, amount.saturating_add(extra)).into_result()?; + let actual = Self::burn_from(source, amount)?; + debug_assert!(actual == amount.saturating_add(extra), "can_withdraw must agree with withdraw; qed"); + match Self::mint_into(dest, actual) { + Ok(_) => Ok(actual), + Err(err) => { + debug_assert!(false, "can_deposit returned true previously; qed"); + // attempt to return the funds back to source + let revert = Self::mint_into(source, actual); + debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); + Err(err) + } + } + } +} + +/// Trait for providing a fungible asset which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer funds from one account into another. + fn transfer( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result; +} + +/// Trait for inspecting a fungible asset which can be reserved. +pub trait InspectHold: Inspect { + /// Amount of funds held in reserve by `who`. + fn balance_on_hold(who: &AccountId) -> Self::Balance; + + /// Check to see if some `amount` of funds of `who` may be placed on hold. + fn can_hold(who: &AccountId, amount: Self::Balance) -> bool; +} + +/// Trait for mutating a fungible asset which can be reserved. +pub trait MutateHold: InspectHold + Transfer { + /// Hold some funds in an account. + fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Release up to `amount` held funds in an account. + /// + /// The actual amount released is returned with `Ok`. + /// + /// If `best_effort` is `true`, then the amount actually unreserved and returned as the inner + /// value of `Ok` may be smaller than the `amount` passed. + fn release(who: &AccountId, amount: Self::Balance, best_effort: bool) + -> Result; + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without + /// error. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_held( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_held: bool, + ) -> Result; +} + +/// Trait for slashing a fungible asset which can be reserved. +pub trait BalancedHold: Balanced + MutateHold { + /// Reduce the balance of some funds on hold in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds that are on hold up to `amount` will be deducted as possible. If this is less + /// than `amount`, then a non-zero second item will be returned. + fn slash_held(who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance); +} + +impl< + AccountId, + T: Balanced + MutateHold, +> BalancedHold for T { + fn slash_held(who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance) + { + let actual = match Self::release(who, amount, true) { + Ok(x) => x, + Err(_) => return (Imbalance::default(), amount), + }; + >::slash(who, actual) + } +} + +/// Convert a `fungibles` trait implementation into a `fungible` trait implementation by identifying +/// a single item. +pub struct ItemOf< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, +>( + sp_std::marker::PhantomData<(F, A, AccountId)> +); + +impl< + F: fungibles::Inspect, + A: Get<>::AssetId>, + AccountId, +> Inspect for ItemOf { + type Balance = >::Balance; + fn total_issuance() -> Self::Balance { + >::total_issuance(A::get()) + } + fn minimum_balance() -> Self::Balance { + >::minimum_balance(A::get()) + } + fn balance(who: &AccountId) -> Self::Balance { + >::balance(A::get(), who) + } + fn reducible_balance(who: &AccountId, keep_alive: bool) -> Self::Balance { + >::reducible_balance(A::get(), who, keep_alive) + } + fn can_deposit(who: &AccountId, amount: Self::Balance) -> DepositConsequence { + >::can_deposit(A::get(), who, amount) + } + fn can_withdraw(who: &AccountId, amount: Self::Balance) -> WithdrawConsequence { + >::can_withdraw(A::get(), who, amount) + } +} + +impl< + F: fungibles::Mutate, + A: Get<>::AssetId>, + AccountId, +> Mutate for ItemOf { + fn mint_into(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::mint_into(A::get(), who, amount) + } + fn burn_from(who: &AccountId, amount: Self::Balance) -> Result { + >::burn_from(A::get(), who, amount) + } +} + +impl< + F: fungibles::Transfer, + A: Get<>::AssetId>, + AccountId, +> Transfer for ItemOf { + fn transfer(source: &AccountId, dest: &AccountId, amount: Self::Balance, keep_alive: bool) + -> Result + { + >::transfer(A::get(), source, dest, amount, keep_alive) + } +} + +impl< + F: fungibles::InspectHold, + A: Get<>::AssetId>, + AccountId, +> InspectHold for ItemOf { + fn balance_on_hold(who: &AccountId) -> Self::Balance { + >::balance_on_hold(A::get(), who) + } + fn can_hold(who: &AccountId, amount: Self::Balance) -> bool { + >::can_hold(A::get(), who, amount) + } +} + +impl< + F: fungibles::MutateHold, + A: Get<>::AssetId>, + AccountId, +> MutateHold for ItemOf { + fn hold(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::hold(A::get(), who, amount) + } + fn release(who: &AccountId, amount: Self::Balance, best_effort: bool) + -> Result + { + >::release(A::get(), who, amount, best_effort) + } + fn transfer_held( + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result { + >::transfer_held( + A::get(), + source, + dest, + amount, + best_effort, + on_hold, + ) + } +} + +impl< + F: fungibles::Unbalanced, + A: Get<>::AssetId>, + AccountId, +> Unbalanced for ItemOf { + fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult { + >::set_balance(A::get(), who, amount) + } + fn set_total_issuance(amount: Self::Balance) -> () { + >::set_total_issuance(A::get(), amount) + } + fn decrease_balance(who: &AccountId, amount: Self::Balance) -> Result { + >::decrease_balance(A::get(), who, amount) + } + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::decrease_balance_at_most(A::get(), who, amount) + } + fn increase_balance(who: &AccountId, amount: Self::Balance) -> Result { + >::increase_balance(A::get(), who, amount) + } + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) -> Self::Balance { + >::increase_balance_at_most(A::get(), who, amount) + } +} diff --git a/frame/support/src/traits/tokens/fungible/balanced.rs b/frame/support/src/traits/tokens/fungible/balanced.rs new file mode 100644 index 0000000000000..19bdb4f245ee8 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/balanced.rs @@ -0,0 +1,360 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The trait and associated types for sets of fungible tokens that manage total issuance without +//! requiring atomic balanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::{TokenError, traits::{CheckedAdd, Zero}}; +use super::super::Imbalance as ImbalanceT; +use crate::traits::misc::{SameOrOther, TryDrop}; +use crate::dispatch::{DispatchResult, DispatchError}; + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(amount: Self::Balance) -> DebtOf; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> CreditOf; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(amount: Self::Balance) -> (DebtOf, CreditOf) { + (Self::rescind(amount), Self::issue(amount)) + } + + /// Deducts up to `value` from the combined balance of `who`. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); + + /// Mints exactly `value` into the account of `who`. + /// + /// If `who` doesn't exist, nothing is done and an `Err` returned. This could happen because it + /// the account doesn't yet exist and it isn't possible to create it under the current + /// circumstances and with `value` in it. + fn deposit( + who: &AccountId, + value: Self::Balance, + ) -> Result, DispatchError>; + + /// Removes `value` balance from `who` account if possible. + /// + /// If the removal is not possible, then it returns `Err` and nothing is changed. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is no less than `value`. It may be more in the case that removing it reduced it below + /// `Self::minimum_balance()`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError>; + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: CreditOf, + ) -> Result<(), CreditOf> { + let v = credit.peek(); + let debt = match Self::deposit(who, v) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + let result = credit.offset(debt).try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: DebtOf, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DebtOf> { + let amount = debt.peek(); + let credit = match Self::withdraw(who, amount) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + SameOrOther::None => Ok(CreditOf::::zero()), + SameOrOther::Same(dust) => Ok(dust), + SameOrOther::Other(rest) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + } + } + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Set the balance of `who` to `amount`. If this cannot be done for some reason (e.g. + /// because the account cannot be created or an overflow) then an `Err` is returned. + fn set_balance(who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Set the total issuance to `amount`. + fn set_total_issuance(amount: Self::Balance); + + /// Reduce the balance of `who` by `amount`. If it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + fn decrease_balance(who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(who); + let (mut new_balance, mut amount) = if old_balance < amount { + Err(TokenError::NoFunds)? + } else { + (old_balance - amount, amount) + }; + if new_balance < Self::minimum_balance() { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + // Defensive only - this should not fail now. + Self::set_balance(who, new_balance)?; + Ok(amount) + } + + /// Reduce the balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + /// + /// Return the imbalance by which the account was reduced. + fn decrease_balance_at_most(who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(who); + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) + } else { + (old_balance - amount, amount) + }; + let minimum_balance = Self::minimum_balance(); + if new_balance < minimum_balance { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + let mut r = Self::set_balance(who, new_balance); + if r.is_err() { + // Some error, probably because we tried to destroy an account which cannot be destroyed. + if new_balance.is_zero() && amount >= minimum_balance { + new_balance = minimum_balance; + amount -= minimum_balance; + r = Self::set_balance(who, new_balance); + } + if r.is_err() { + // Still an error. Apparently it's not possible to reduce at all. + amount = Zero::zero(); + } + } + amount + } + + /// Increase the balance of `who` by `amount`. If it cannot be increased by that amount + /// for some reason, return `Err` and don't increase it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance(who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(who); + let new_balance = old_balance.checked_add(&amount).ok_or(TokenError::Overflow)?; + if new_balance < Self::minimum_balance() { + Err(TokenError::BelowMinimum)? + } + if old_balance != new_balance { + Self::set_balance(who, new_balance)?; + } + Ok(amount) + } + + /// Increase the balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance will be zero in the case that + /// `amount < Self::minimum_balance()`. + /// + /// Return the imbalance by which the account was increased. + fn increase_balance_at_most(who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(who); + let mut new_balance = old_balance.saturating_add(amount); + let mut amount = new_balance - old_balance; + if new_balance < Self::minimum_balance() { + new_balance = Zero::zero(); + amount = Zero::zero(); + } + if old_balance == new_balance || Self::set_balance(who, new_balance).is_ok() { + amount + } else { + Zero::zero() + } + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(amount: U::Balance) { + U::set_total_issuance(U::total_issuance().saturating_sub(amount)) + } +} + +/// An imbalance type which uses `DecreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that funds in someone's account have been removed and not yet placed anywhere +/// else. If it gets dropped, then those funds will be assumed to be "burned" and the total supply +/// will be accordingly decreased to ensure it equals the sum of the balances of all accounts. +type Credit = Imbalance< + >::Balance, + DecreaseIssuance, + IncreaseIssuance, +>; + +/// An imbalance type which uses `IncreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that there are funds in someone's account whose origin is as yet unaccounted +/// for. If it gets dropped, then those funds will be assumed to be "minted" and the total supply +/// will be accordingly increased to ensure it equals the sum of the balances of all accounts. +type Debt = Imbalance< + >::Balance, + IncreaseIssuance, + DecreaseIssuance, +>; + +/// Create some `Credit` item. Only for internal use. +fn credit>( + amount: U::Balance, +) -> Credit { + Imbalance::new(amount) +} + +/// Create some `Debt` item. Only for internal use. +fn debt>( + amount: U::Balance, +) -> Debt { + Imbalance::new(amount) +} + +impl> Balanced for U { + type OnDropCredit = DecreaseIssuance; + type OnDropDebt = IncreaseIssuance; + fn rescind(amount: Self::Balance) -> Debt { + let old = U::total_issuance(); + let new = old.saturating_sub(amount); + U::set_total_issuance(new); + debt(old - new) + } + fn issue(amount: Self::Balance) -> Credit { + let old = U::total_issuance(); + let new = old.saturating_add(amount); + U::set_total_issuance(new); + credit(new - old) + } + fn slash( + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let slashed = U::decrease_balance_at_most(who, amount); + // `slashed` could be less than, greater than or equal to `amount`. + // If slashed == amount, it means the account had at least amount in it and it could all be + // removed without a problem. + // If slashed > amount, it means the account had more than amount in it, but not enough more + // to push it over minimum_balance. + // If slashed < amount, it means the account didn't have enough in it to be reduced by + // `amount` without being destroyed. + (credit(slashed), amount.saturating_sub(slashed)) + } + fn deposit( + who: &AccountId, + amount: Self::Balance + ) -> Result, DispatchError> { + let increase = U::increase_balance(who, amount)?; + Ok(debt(increase)) + } + fn withdraw( + who: &AccountId, + amount: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError> { + let decrease = U::decrease_balance(who, amount)?; + Ok(credit(decrease)) + } +} diff --git a/frame/support/src/traits/tokens/fungible/imbalance.rs b/frame/support/src/traits/tokens/fungible/imbalance.rs new file mode 100644 index 0000000000000..c084fa97fbec0 --- /dev/null +++ b/frame/support/src/traits/tokens/fungible/imbalance.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance type and its associates, which handles keeps everything adding up properly with +//! unbalanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use super::misc::Balance; +use super::balanced::Balanced; +use crate::traits::misc::{TryDrop, SameOrOther}; +use super::super::Imbalance as ImbalanceT; + +/// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or +/// debt (positive) imbalance. +pub trait HandleImbalanceDrop { + /// Some something with the imbalance's value which is being dropped. + fn handle(amount: Balance); +} + +/// An imbalance in the system, representing a divergence of recorded token supply from the sum of +/// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing +/// into an account, settling from an account or altering the supply). +/// +/// Importantly, it has a special `Drop` impl, and cannot be created outside of this module. +#[must_use] +pub struct Imbalance< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> { + amount: B, + _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop +> Drop for Imbalance { + fn drop(&mut self) { + if !self.amount.is_zero() { + OnDrop::handle(self.amount) + } + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> TryDrop for Imbalance { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self> { + self.drop_zero() + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> Default for Imbalance { + fn default() -> Self { + Self::zero() + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> Imbalance { + pub(crate) fn new(amount: B) -> Self { + Self { amount, _phantom: PhantomData } + } +} + +impl< + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> ImbalanceT for Imbalance { + type Opposite = Imbalance; + + fn zero() -> Self { + Self { amount: Zero::zero(), _phantom: PhantomData } + } + + fn drop_zero(self) -> Result<(), Self> { + if self.amount.is_zero() { + sp_std::mem::forget(self); + Ok(()) + } else { + Err(self) + } + } + + fn split(self, amount: B) -> (Self, Self) { + let first = self.amount.min(amount); + let second = self.amount - first; + sp_std::mem::forget(self); + (Imbalance::new(first), Imbalance::new(second)) + } + fn merge(mut self, other: Self) -> Self { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + self + } + fn subsume(&mut self, other: Self) { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + } + fn offset(self, other: Imbalance) + -> SameOrOther> + { + let (a, b) = (self.amount, other.amount); + sp_std::mem::forget((self, other)); + + if a == b { + SameOrOther::None + } else if a > b { + SameOrOther::Same(Imbalance::new(a - b)) + } else { + SameOrOther::Other(Imbalance::::new(b - a)) + } + } + fn peek(&self) -> B { + self.amount + } +} + +/// Imbalance implying that the total_issuance value is less than the sum of all account balances. +pub type DebtOf = Imbalance< + >::Balance, + // This will generally be implemented by increasing the total_issuance value. + >::OnDropDebt, + >::OnDropCredit, +>; + +/// Imbalance implying that the total_issuance value is greater than the sum of all account balances. +pub type CreditOf = Imbalance< + >::Balance, + // This will generally be implemented by decreasing the total_issuance value. + >::OnDropCredit, + >::OnDropDebt, +>; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs new file mode 100644 index 0000000000000..490f28dfb453a --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -0,0 +1,210 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The traits for sets of fungible tokens and any associated types. + +use super::*; +use crate::dispatch::{DispatchError, DispatchResult}; +use super::misc::{AssetId, Balance}; +use sp_runtime::traits::Saturating; + +mod balanced; +pub use balanced::{Balanced, Unbalanced}; +mod imbalance; +pub use imbalance::{Imbalance, HandleImbalanceDrop, DebtOf, CreditOf}; + +/// Trait for providing balance-inspection access to a set of named fungible assets. +pub trait Inspect { + /// Means of identifying one asset class from another. + type AssetId: AssetId; + + /// Scalar type for representing balance of an account. + type Balance: Balance; + + /// The total amount of issuance in the system. + fn total_issuance(asset: Self::AssetId) -> Self::Balance; + + /// The minimum balance any single account may have. + fn minimum_balance(asset: Self::AssetId) -> Self::Balance; + + /// Get the `asset` balance of `who`. + fn balance(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Get the maximum amount of `asset` that `who` can withdraw/transfer successfully. + fn reducible_balance(asset: Self::AssetId, who: &AccountId, keep_alive: bool) -> Self::Balance; + + /// Returns `true` if the `asset` balance of `who` may be increased by `amount`. + fn can_deposit(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> DepositConsequence; + + /// Returns `Failed` if the `asset` balance of `who` may not be decreased by `amount`, otherwise + /// the consequence. + fn can_withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence; +} + +/// Trait for providing a set of named fungible assets which can be created and destroyed. +pub trait Mutate: Inspect { + /// Attempt to increase the `asset` balance of `who` by `amount`. + /// + /// If not possible then don't do anything. Possible reasons for failure include: + /// - Minimum balance not met. + /// - Account cannot be created (e.g. because there is no provider reference and/or the asset + /// isn't considered worth anything). + /// + /// Since this is an operation which should be possible to take alone, if successful it will + /// increase the overall supply of the underlying token. + fn mint_into(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Attempt to reduce the `asset` balance of `who` by `amount`. + /// + /// If not possible then don't do anything. Possible reasons for failure include: + /// - Less funds in the account than `amount` + /// - Liquidity requirements (locks, reservations) prevent the funds from being removed + /// - Operation would require destroying the account and it is required to stay alive (e.g. + /// because it's providing a needed provider reference). + /// + /// Since this is an operation which should be possible to take alone, if successful it will + /// reduce the overall supply of the underlying token. + /// + /// Due to minimum balance requirements, it's possible that the amount withdrawn could be up to + /// `Self::minimum_balance() - 1` more than the `amount`. The total amount withdrawn is returned + /// in an `Ok` result. This may be safely ignored if you don't mind the overall supply reducing. + fn burn_from(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result; + + /// Attempt to reduce the `asset` balance of `who` by as much as possible up to `amount`, and + /// possibly slightly more due to minimum_balance requirements. If no decrease is possible then + /// an `Err` is returned and nothing is changed. If successful, the amount of tokens reduced is + /// returned. + /// + /// The default implementation just uses `withdraw` along with `reducible_balance` to ensure + /// that is doesn't fail. + fn slash(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result + { + Self::burn_from(asset, who, Self::reducible_balance(asset, who, false).min(amount)) + } + + /// Transfer funds from one account into another. The default implementation uses `mint_into` + /// and `burn_from` and may generate unwanted events. + fn teleport( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> Result { + let extra = Self::can_withdraw(asset, &source, amount).into_result()?; + Self::can_deposit(asset, &dest, amount.saturating_add(extra)).into_result()?; + let actual = Self::burn_from(asset, source, amount)?; + debug_assert!(actual == amount.saturating_add(extra), "can_withdraw must agree with withdraw; qed"); + match Self::mint_into(asset, dest, actual) { + Ok(_) => Ok(actual), + Err(err) => { + debug_assert!(false, "can_deposit returned true previously; qed"); + // attempt to return the funds back to source + let revert = Self::mint_into(asset, source, actual); + debug_assert!(revert.is_ok(), "withdrew funds previously; qed"); + Err(err) + } + } + } +} + +/// Trait for providing a set of named fungible assets which can only be transferred. +pub trait Transfer: Inspect { + /// Transfer funds from one account into another. + fn transfer( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + keep_alive: bool, + ) -> Result; +} + +/// Trait for inspecting a set of named fungible assets which can be placed on hold. +pub trait InspectHold: Inspect { + /// Amount of funds held in hold. + fn balance_on_hold(asset: Self::AssetId, who: &AccountId) -> Self::Balance; + + /// Check to see if some `amount` of `asset` may be held on the account of `who`. + fn can_hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> bool; +} + +/// Trait for mutating a set of named fungible assets which can be placed on hold. +pub trait MutateHold: InspectHold + Transfer { + /// Hold some funds in an account. + fn hold(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Release some funds in an account from being on hold. + /// + /// If `best_effort` is `true`, then the amount actually released and returned as the inner + /// value of `Ok` may be smaller than the `amount` passed. + fn release(asset: Self::AssetId, who: &AccountId, amount: Self::Balance, best_effort: bool) + -> Result; + + /// Transfer held funds into a destination account. + /// + /// If `on_hold` is `true`, then the destination account must already exist and the assets + /// transferred will still be on hold in the destination account. If not, then the destination + /// account need not already exist, but must be creatable. + /// + /// If `best_effort` is `true`, then an amount less than `amount` may be transferred without + /// error. + /// + /// The actual amount transferred is returned, or `Err` in the case of error and nothing is + /// changed. + fn transfer_held( + asset: Self::AssetId, + source: &AccountId, + dest: &AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result; +} + +/// Trait for mutating one of several types of fungible assets which can be held. +pub trait BalancedHold: Balanced + MutateHold { + /// Release and slash some funds in an account. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `amount` will be deducted as possible. If this is less than `amount`, + /// then a non-zero second item will be returned. + fn slash_held(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance); +} + +impl< + AccountId, + T: Balanced + MutateHold, +> BalancedHold for T { + fn slash_held(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> (CreditOf, Self::Balance) + { + let actual = match Self::release(asset, who, amount, true) { + Ok(x) => x, + Err(_) => return (Imbalance::zero(asset), amount), + }; + >::slash(asset, who, actual) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/balanced.rs b/frame/support/src/traits/tokens/fungibles/balanced.rs new file mode 100644 index 0000000000000..efb21300bcaa8 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/balanced.rs @@ -0,0 +1,378 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The trait and associated types for sets of fungible tokens that manage total issuance without +//! requiring atomic balanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::{TokenError, traits::{Zero, CheckedAdd}}; +use sp_arithmetic::traits::Saturating; +use crate::dispatch::{DispatchError, DispatchResult}; +use crate::traits::misc::{SameOrOther, TryDrop}; + +/// A fungible token class where any creation and deletion of tokens is semi-explicit and where the +/// total supply is maintained automatically. +/// +/// This is auto-implemented when a token class has `Unbalanced` implemented. +pub trait Balanced: Inspect { + /// The type for managing what happens when an instance of `Debt` is dropped without being used. + type OnDropDebt: HandleImbalanceDrop; + /// The type for managing what happens when an instance of `Credit` is dropped without being + /// used. + type OnDropCredit: HandleImbalanceDrop; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> DebtOf; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(asset: Self::AssetId, amount: Self::Balance) -> CreditOf; + + /// Produce a pair of imbalances that cancel each other out exactly. + /// + /// This is just the same as burning and issuing the same amount and has no effect on the + /// total issuance. + fn pair(asset: Self::AssetId, amount: Self::Balance) + -> (DebtOf, CreditOf) + { + (Self::rescind(asset, amount), Self::issue(asset, amount)) + } + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (CreditOf, Self::Balance); + + /// Mints exactly `value` into the `asset` account of `who`. + /// + /// If `who` doesn't exist, nothing is done and an `Err` returned. This could happen because it + /// the account doesn't yet exist and it isn't possible to create it under the current + /// circumstances and with `value` in it. + fn deposit( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + ) -> Result, DispatchError>; + + /// Removes `value` free `asset` balance from `who` account if possible. + /// + /// If the removal is not possible, then it returns `Err` and nothing is changed. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is no less than `value`. It may be more in the case that removing it reduced it below + /// `Self::minimum_balance()`. + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + value: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError>; + + /// The balance of `who` is increased in order to counter `credit`. If the whole of `credit` + /// cannot be countered, then nothing is changed and the original `credit` is returned in an + /// `Err`. + /// + /// Please note: If `credit.peek()` is less than `Self::minimum_balance()`, then `who` must + /// already exist for this to succeed. + fn resolve( + who: &AccountId, + credit: CreditOf, + ) -> Result<(), CreditOf> { + let v = credit.peek(); + let debt = match Self::deposit(credit.asset(), who, v) { + Err(_) => return Err(credit), + Ok(d) => d, + }; + if let Ok(result) = credit.offset(debt) { + let result = result.try_drop(); + debug_assert!(result.is_ok(), "ok deposit return must be equal to credit value; qed"); + } else { + debug_assert!(false, "debt.asset is credit.asset; qed"); + } + Ok(()) + } + + /// The balance of `who` is decreased in order to counter `debt`. If the whole of `debt` + /// cannot be countered, then nothing is changed and the original `debt` is returned in an + /// `Err`. + fn settle( + who: &AccountId, + debt: DebtOf, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DebtOf> { + let amount = debt.peek(); + let asset = debt.asset(); + let credit = match Self::withdraw(asset, who, amount) { + Err(_) => return Err(debt), + Ok(d) => d, + }; + match credit.offset(debt) { + Ok(SameOrOther::None) => Ok(CreditOf::::zero(asset)), + Ok(SameOrOther::Same(dust)) => Ok(dust), + Ok(SameOrOther::Other(rest)) => { + debug_assert!(false, "ok withdraw return must be at least debt value; qed"); + Err(rest) + } + Err(_) => { + debug_assert!(false, "debt.asset is credit.asset; qed"); + Ok(CreditOf::::zero(asset)) + } + } + } +} + +/// A fungible token class where the balance can be set arbitrarily. +/// +/// **WARNING** +/// Do not use this directly unless you want trouble, since it allows you to alter account balances +/// without keeping the issuance up to date. It has no safeguards against accidentally creating +/// token imbalances in your system leading to accidental imflation or deflation. It's really just +/// for the underlying datatype to implement so the user gets the much safer `Balanced` trait to +/// use. +pub trait Unbalanced: Inspect { + /// Set the `asset` balance of `who` to `amount`. If this cannot be done for some reason (e.g. + /// because the account cannot be created or an overflow) then an `Err` is returned. + fn set_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) -> DispatchResult; + + /// Set the total issuance of `asset` to `amount`. + fn set_total_issuance(asset: Self::AssetId, amount: Self::Balance); + + /// Reduce the `asset` balance of `who` by `amount`. If it cannot be reduced by that amount for + /// some reason, return `Err` and don't reduce it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + fn decrease_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(asset, who); + let (mut new_balance, mut amount) = if old_balance < amount { + Err(TokenError::NoFunds)? + } else { + (old_balance - amount, amount) + }; + if new_balance < Self::minimum_balance(asset) { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + // Defensive only - this should not fail now. + Self::set_balance(asset, who, new_balance)?; + Ok(amount) + } + + /// Reduce the `asset` balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance may be up to + /// `Self::minimum_balance() - 1` greater than `amount`. + /// + /// Return the imbalance by which the account was reduced. + fn decrease_balance_at_most(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(asset, who); + let (mut new_balance, mut amount) = if old_balance < amount { + (Zero::zero(), old_balance) + } else { + (old_balance - amount, amount) + }; + let minimum_balance = Self::minimum_balance(asset); + if new_balance < minimum_balance { + amount = amount.saturating_add(new_balance); + new_balance = Zero::zero(); + } + let mut r = Self::set_balance(asset, who, new_balance); + if r.is_err() { + // Some error, probably because we tried to destroy an account which cannot be destroyed. + if new_balance.is_zero() && amount >= minimum_balance { + new_balance = minimum_balance; + amount -= minimum_balance; + r = Self::set_balance(asset, who, new_balance); + } + if r.is_err() { + // Still an error. Apparently it's not possible to reduce at all. + amount = Zero::zero(); + } + } + amount + } + + /// Increase the `asset` balance of `who` by `amount`. If it cannot be increased by that amount + /// for some reason, return `Err` and don't increase it at all. If Ok, return the imbalance. + /// + /// Minimum balance will be respected and an error will be returned if + /// `amount < Self::minimum_balance()` when the account of `who` is zero. + fn increase_balance(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Result + { + let old_balance = Self::balance(asset, who); + let new_balance = old_balance.checked_add(&amount).ok_or(TokenError::Overflow)?; + if new_balance < Self::minimum_balance(asset) { + Err(TokenError::BelowMinimum)? + } + if old_balance != new_balance { + Self::set_balance(asset, who, new_balance)?; + } + Ok(amount) + } + + /// Increase the `asset` balance of `who` by the most that is possible, up to `amount`. + /// + /// Minimum balance will be respected and the returned imbalance will be zero in the case that + /// `amount < Self::minimum_balance()`. + /// + /// Return the imbalance by which the account was increased. + fn increase_balance_at_most(asset: Self::AssetId, who: &AccountId, amount: Self::Balance) + -> Self::Balance + { + let old_balance = Self::balance(asset, who); + let mut new_balance = old_balance.saturating_add(amount); + let mut amount = new_balance - old_balance; + if new_balance < Self::minimum_balance(asset) { + new_balance = Zero::zero(); + amount = Zero::zero(); + } + if old_balance == new_balance || Self::set_balance(asset, who, new_balance).is_ok() { + amount + } else { + Zero::zero() + } + } +} + +/// Simple handler for an imbalance drop which increases the total issuance of the system by the +/// imbalance amount. Used for leftover debt. +pub struct IncreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for IncreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)) + } +} + +/// Simple handler for an imbalance drop which decreases the total issuance of the system by the +/// imbalance amount. Used for leftover credit. +pub struct DecreaseIssuance(PhantomData<(AccountId, U)>); +impl> HandleImbalanceDrop + for DecreaseIssuance +{ + fn handle(asset: U::AssetId, amount: U::Balance) { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)) + } +} + +/// An imbalance type which uses `DecreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that funds in someone's account have been removed and not yet placed anywhere +/// else. If it gets dropped, then those funds will be assumed to be "burned" and the total supply +/// will be accordingly decreased to ensure it equals the sum of the balances of all accounts. +type Credit = Imbalance< + >::AssetId, + >::Balance, + DecreaseIssuance, + IncreaseIssuance, +>; + +/// An imbalance type which uses `IncreaseIssuance` to deal with anything `Drop`ed. +/// +/// Basically means that there are funds in someone's account whose origin is as yet unaccounted +/// for. If it gets dropped, then those funds will be assumed to be "minted" and the total supply +/// will be accordingly increased to ensure it equals the sum of the balances of all accounts. +type Debt = Imbalance< + >::AssetId, + >::Balance, + IncreaseIssuance, + DecreaseIssuance, +>; + +/// Create some `Credit` item. Only for internal use. +fn credit>( + asset: U::AssetId, + amount: U::Balance, +) -> Credit { + Imbalance::new(asset, amount) +} + +/// Create some `Debt` item. Only for internal use. +fn debt>( + asset: U::AssetId, + amount: U::Balance, +) -> Debt { + Imbalance::new(asset, amount) +} + +impl> Balanced for U { + type OnDropCredit = DecreaseIssuance; + type OnDropDebt = IncreaseIssuance; + fn rescind(asset: Self::AssetId, amount: Self::Balance) -> Debt { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_sub(amount)); + debt(asset, amount) + } + fn issue(asset: Self::AssetId, amount: Self::Balance) -> Credit { + U::set_total_issuance(asset, U::total_issuance(asset).saturating_add(amount)); + credit(asset, amount) + } + fn slash( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + ) -> (Credit, Self::Balance) { + let slashed = U::decrease_balance_at_most(asset, who, amount); + // `slashed` could be less than, greater than or equal to `amount`. + // If slashed == amount, it means the account had at least amount in it and it could all be + // removed without a problem. + // If slashed > amount, it means the account had more than amount in it, but not enough more + // to push it over minimum_balance. + // If slashed < amount, it means the account didn't have enough in it to be reduced by + // `amount` without being destroyed. + (credit(asset, slashed), amount.saturating_sub(slashed)) + } + fn deposit( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance + ) -> Result, DispatchError> { + let increase = U::increase_balance(asset, who, amount)?; + Ok(debt(asset, increase)) + } + fn withdraw( + asset: Self::AssetId, + who: &AccountId, + amount: Self::Balance, + //TODO: liveness: ExistenceRequirement, + ) -> Result, DispatchError> { + let decrease = U::decrease_balance(asset, who, amount)?; + Ok(credit(asset, decrease)) + } +} diff --git a/frame/support/src/traits/tokens/fungibles/imbalance.rs b/frame/support/src/traits/tokens/fungibles/imbalance.rs new file mode 100644 index 0000000000000..ecc415cb568bd --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/imbalance.rs @@ -0,0 +1,169 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance type and its associates, which handles keeps everything adding up properly with +//! unbalanced operations. + +use super::*; +use sp_std::marker::PhantomData; +use sp_runtime::traits::Zero; +use super::fungibles::{AssetId, Balance}; +use super::balanced::Balanced; +use crate::traits::misc::{TryDrop, SameOrOther}; + +/// Handler for when an imbalance gets dropped. This could handle either a credit (negative) or +/// debt (positive) imbalance. +pub trait HandleImbalanceDrop { + fn handle(asset: AssetId, amount: Balance); +} + +/// An imbalance in the system, representing a divergence of recorded token supply from the sum of +/// the balances of all accounts. This is `must_use` in order to ensure it gets handled (placing +/// into an account, settling from an account or altering the supply). +/// +/// Importantly, it has a special `Drop` impl, and cannot be created outside of this module. +#[must_use] +pub struct Imbalance< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> { + asset: A, + amount: B, + _phantom: PhantomData<(OnDrop, OppositeOnDrop)>, +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop +> Drop for Imbalance { + fn drop(&mut self) { + if !self.amount.is_zero() { + OnDrop::handle(self.asset, self.amount) + } + } +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> TryDrop for Imbalance { + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self> { + self.drop_zero() + } +} + +impl< + A: AssetId, + B: Balance, + OnDrop: HandleImbalanceDrop, + OppositeOnDrop: HandleImbalanceDrop, +> Imbalance { + pub fn zero(asset: A) -> Self { + Self { asset, amount: Zero::zero(), _phantom: PhantomData } + } + + pub(crate) fn new(asset: A, amount: B) -> Self { + Self { asset, amount, _phantom: PhantomData } + } + + pub fn drop_zero(self) -> Result<(), Self> { + if self.amount.is_zero() { + sp_std::mem::forget(self); + Ok(()) + } else { + Err(self) + } + } + + pub fn split(self, amount: B) -> (Self, Self) { + let first = self.amount.min(amount); + let second = self.amount - first; + let asset = self.asset; + sp_std::mem::forget(self); + (Imbalance::new(asset, first), Imbalance::new(asset, second)) + } + pub fn merge(mut self, other: Self) -> Result { + if self.asset == other.asset { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + Ok(self) + } else { + Err((self, other)) + } + } + pub fn subsume(&mut self, other: Self) -> Result<(), Self> { + if self.asset == other.asset { + self.amount = self.amount.saturating_add(other.amount); + sp_std::mem::forget(other); + Ok(()) + } else { + Err(other) + } + } + pub fn offset(self, other: Imbalance) -> Result< + SameOrOther>, + (Self, Imbalance), + > { + if self.asset == other.asset { + let (a, b) = (self.amount, other.amount); + let asset = self.asset; + sp_std::mem::forget((self, other)); + + if a == b { + Ok(SameOrOther::None) + } else if a > b { + Ok(SameOrOther::Same(Imbalance::new(asset, a - b))) + } else { + Ok(SameOrOther::Other(Imbalance::::new(asset, b - a))) + } + } else { + Err((self, other)) + } + } + pub fn peek(&self) -> B { + self.amount + } + + pub fn asset(&self) -> A { + self.asset + } +} + +/// Imbalance implying that the total_issuance value is less than the sum of all account balances. +pub type DebtOf = Imbalance< + >::AssetId, + >::Balance, + // This will generally be implemented by increasing the total_issuance value. + >::OnDropDebt, + >::OnDropCredit, +>; + +/// Imbalance implying that the total_issuance value is greater than the sum of all account balances. +pub type CreditOf = Imbalance< + >::AssetId, + >::Balance, + // This will generally be implemented by decreasing the total_issuance value. + >::OnDropCredit, + >::OnDropDebt, +>; diff --git a/frame/support/src/traits/tokens/imbalance.rs b/frame/support/src/traits/tokens/imbalance.rs new file mode 100644 index 0000000000000..9652b9a0275a1 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance.rs @@ -0,0 +1,174 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The imbalance trait type and its associates, which handles keeps everything adding up properly +//! with unbalanced operations. + +use sp_std::ops::Div; +use sp_runtime::traits::Saturating; +use crate::traits::misc::{TryDrop, SameOrOther}; + +mod split_two_ways; +mod signed_imbalance; +mod on_unbalanced; +pub use split_two_ways::SplitTwoWays; +pub use signed_imbalance::SignedImbalance; +pub use on_unbalanced::OnUnbalanced; + +/// A trait for a not-quite Linear Type that tracks an imbalance. +/// +/// Functions that alter account balances return an object of this trait to +/// express how much account balances have been altered in aggregate. If +/// dropped, the currency system will take some default steps to deal with +/// the imbalance (`balances` module simply reduces or increases its +/// total issuance). Your module should generally handle it in some way, +/// good practice is to do so in a configurable manner using an +/// `OnUnbalanced` type for each situation in which your module needs to +/// handle an imbalance. +/// +/// Imbalances can either be Positive (funds were added somewhere without +/// being subtracted elsewhere - e.g. a reward) or Negative (funds deducted +/// somewhere without an equal and opposite addition - e.g. a slash or +/// system fee payment). +/// +/// Since they are unsigned, the actual type is always Positive or Negative. +/// The trait makes no distinction except to define the `Opposite` type. +/// +/// New instances of zero value can be created (`zero`) and destroyed +/// (`drop_zero`). +/// +/// Existing instances can be `split` and merged either consuming `self` with +/// `merge` or mutating `self` with `subsume`. If the target is an `Option`, +/// then `maybe_merge` and `maybe_subsume` might work better. Instances can +/// also be `offset` with an `Opposite` that is less than or equal to in value. +/// +/// You can always retrieve the raw balance value using `peek`. +#[must_use] +pub trait Imbalance: Sized + TryDrop + Default { + /// The oppositely imbalanced type. They come in pairs. + type Opposite: Imbalance; + + /// The zero imbalance. Can be destroyed with `drop_zero`. + fn zero() -> Self; + + /// Drop an instance cleanly. Only works if its `self.value()` is zero. + fn drop_zero(self) -> Result<(), Self>; + + /// Consume `self` and return two independent instances; the first + /// is guaranteed to be at most `amount` and the second will be the remainder. + fn split(self, amount: Balance) -> (Self, Self); + + /// Consume `self` and return two independent instances; the amounts returned will be in + /// approximately the same ratio as `first`:`second`. + /// + /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should + /// fit into a `u32`. Overflow will safely saturate in both cases. + fn ration(self, first: u32, second: u32) -> (Self, Self) + where Balance: From + Saturating + Div + { + let total: u32 = first.saturating_add(second); + if total == 0 { return (Self::zero(), Self::zero()) } + let amount1 = self.peek().saturating_mul(first.into()) / total.into(); + self.split(amount1) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { + let (a, b) = self.split(amount); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) + where Balance: From + Saturating + Div + { + let (a, b) = self.ration(first, second); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise into two pre-existing Imbalance refs. + /// + /// A convenient replacement for `split` and `subsume`. + fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { + let (a, b) = self.split(amount); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) + where Balance: From + Saturating + Div + { + let (a, b) = self.ration(first, second); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + fn merge(self, other: Self) -> Self; + + /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with + /// reversed arguments. + fn merge_into(self, other: &mut Self) { + other.subsume(self) + } + + /// Consume `self` and maybe an `other` to return a new instance that combines + /// both. + fn maybe_merge(self, other: Option) -> Self { + if let Some(o) = other { + self.merge(o) + } else { + self + } + } + + /// Consume an `other` to mutate `self` into a new instance that combines + /// both. + fn subsume(&mut self, other: Self); + + /// Maybe consume an `other` to mutate `self` into a new instance that combines + /// both. + fn maybe_subsume(&mut self, other: Option) { + if let Some(o) = other { + self.subsume(o) + } + } + + /// Consume self and along with an opposite counterpart to return + /// a combined result. + /// + /// Returns `Ok` along with a new instance of `Self` if this instance has a + /// greater value than the `other`. Otherwise returns `Err` with an instance of + /// the `Opposite`. In both cases the value represents the combination of `self` + /// and `other`. + fn offset(self, other: Self::Opposite)-> SameOrOther; + + /// The raw value of self. + fn peek(&self) -> Balance; +} diff --git a/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs b/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs new file mode 100644 index 0000000000000..f3ecc14308e74 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/on_unbalanced.rs @@ -0,0 +1,50 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Trait for handling imbalances. + +use crate::traits::misc::TryDrop; + +/// Handler for when some currency "account" decreased in balance for +/// some reason. +/// +/// The only reason at present for an increase would be for validator rewards, but +/// there may be other reasons in the future or for other chains. +/// +/// Reasons for decreases include: +/// +/// - Someone got slashed. +/// - Someone paid for a transaction to be included. +pub trait OnUnbalanced { + /// Handler for some imbalances. The different imbalances might have different origins or + /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all + /// of them. Infallible. + fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { + Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) + } + + /// Handler for some imbalance. Infallible. + fn on_unbalanced(amount: Imbalance) { + amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) + } + + /// Actually handle a non-zero imbalance. You probably want to implement this rather than + /// `on_unbalanced`. + fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } +} + +impl OnUnbalanced for () {} diff --git a/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs new file mode 100644 index 0000000000000..e3523f86804fd --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/signed_imbalance.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Convenience type for managing an imbalance whose sign is unknown. + +use codec::FullCodec; +use sp_std::fmt::Debug; +use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; +use crate::traits::misc::SameOrOther; +use super::super::imbalance::Imbalance; + +/// Either a positive or a negative imbalance. +pub enum SignedImbalance>{ + /// A positive imbalance (funds have been created but none destroyed). + Positive(PositiveImbalance), + /// A negative imbalance (funds have been destroyed but none created). + Negative(PositiveImbalance::Opposite), +} + +impl< + P: Imbalance, + N: Imbalance, + B: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, +> SignedImbalance { + /// Create a `Positive` instance of `Self` whose value is zero. + pub fn zero() -> Self { + SignedImbalance::Positive(P::zero()) + } + + /// Drop `Self` if and only if it is equal to zero. Return `Err` with `Self` if not. + pub fn drop_zero(self) -> Result<(), Self> { + match self { + SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), + SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), + } + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + pub fn merge(self, other: Self) -> Self { + match (self, other) { + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => + SignedImbalance::Positive(one.merge(other)), + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => + SignedImbalance::Negative(one.merge(other)), + (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => + match one.offset(other) { + SameOrOther::Same(positive) => SignedImbalance::Positive(positive), + SameOrOther::Other(negative) => SignedImbalance::Negative(negative), + SameOrOther::None => SignedImbalance::Positive(P::zero()), + }, + (one, other) => other.merge(one), + } + } +} diff --git a/frame/support/src/traits/tokens/imbalance/split_two_ways.rs b/frame/support/src/traits/tokens/imbalance/split_two_ways.rs new file mode 100644 index 0000000000000..f3f9870b62cd2 --- /dev/null +++ b/frame/support/src/traits/tokens/imbalance/split_two_ways.rs @@ -0,0 +1,51 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Means for splitting an imbalance into two and hanlding them differently. + +use sp_std::{ops::Div, marker::PhantomData}; +use sp_core::u32_trait::Value as U32; +use sp_runtime::traits::Saturating; +use super::super::imbalance::{Imbalance, OnUnbalanced}; + +/// Split an unbalanced amount two ways between a common divisor. +pub struct SplitTwoWays< + Balance, + Imbalance, + Part1, + Target1, + Part2, + Target2, +>(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); + +impl< + Balance: From + Saturating + Div, + I: Imbalance, + Part1: U32, + Target1: OnUnbalanced, + Part2: U32, + Target2: OnUnbalanced, +> OnUnbalanced for SplitTwoWays +{ + fn on_nonzero_unbalanced(amount: I) { + let total: u32 = Part1::VALUE + Part2::VALUE; + let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); + let (imb1, imb2) = amount.split(amount1); + Target1::on_unbalanced(imb1); + Target2::on_unbalanced(imb2); + } +} diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs new file mode 100644 index 0000000000000..9871123abd595 --- /dev/null +++ b/frame/support/src/traits/tokens/misc.rs @@ -0,0 +1,168 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Miscellaneous types. + +use codec::{Encode, Decode, FullCodec}; +use sp_core::RuntimeDebug; +use sp_arithmetic::traits::{Zero, AtLeast32BitUnsigned}; +use sp_runtime::TokenError; + +/// One of a number of consequences of withdrawing a fungible from an account. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum WithdrawConsequence { + /// Withdraw could not happen since the amount to be withdrawn is less than the total funds in + /// the account. + NoFunds, + /// The withdraw would mean the account dying when it needs to exist (usually because it is a + /// provider and there are consumer references on it). + WouldDie, + /// The asset is unknown. Usually because an `AssetId` has been presented which doesn't exist + /// on the system. + UnknownAsset, + /// There has been an underflow in the system. This is indicative of a corrupt state and + /// likely unrecoverable. + Underflow, + /// There has been an overflow in the system. This is indicative of a corrupt state and + /// likely unrecoverable. + Overflow, + /// Not enough of the funds in the account are unavailable for withdrawal. + Frozen, + /// Account balance would reduce to zero, potentially destroying it. The parameter is the + /// amount of balance which is destroyed. + ReducedToZero(Balance), + /// Account continued in existence. + Success, +} + +impl WithdrawConsequence { + /// Convert the type into a `Result` with `TokenError` as the error or the additional `Balance` + /// by which the account will be reduced. + pub fn into_result(self) -> Result { + use WithdrawConsequence::*; + match self { + NoFunds => Err(TokenError::NoFunds), + WouldDie => Err(TokenError::WouldDie), + UnknownAsset => Err(TokenError::UnknownAsset), + Underflow => Err(TokenError::Underflow), + Overflow => Err(TokenError::Overflow), + Frozen => Err(TokenError::Frozen), + ReducedToZero(result) => Ok(result), + Success => Ok(Zero::zero()), + } + } +} + +/// One of a number of consequences of withdrawing a fungible from an account. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum DepositConsequence { + /// Deposit couldn't happen due to the amount being too low. This is usually because the + /// account doesn't yet exist and the deposit wouldn't bring it to at least the minimum needed + /// for existance. + BelowMinimum, + /// Deposit cannot happen since the account cannot be created (usually because it's a consumer + /// and there exists no provider reference). + CannotCreate, + /// The asset is unknown. Usually because an `AssetId` has been presented which doesn't exist + /// on the system. + UnknownAsset, + /// An overflow would occur. This is practically unexpected, but could happen in test systems + /// with extremely small balance types or balances that approach the max value of the balance + /// type. + Overflow, + /// Account continued in existence. + Success, +} + +impl DepositConsequence { + /// Convert the type into a `Result` with `TokenError` as the error. + pub fn into_result(self) -> Result<(), TokenError> { + use DepositConsequence::*; + Err(match self { + BelowMinimum => TokenError::BelowMinimum, + CannotCreate => TokenError::CannotCreate, + UnknownAsset => TokenError::UnknownAsset, + Overflow => TokenError::Overflow, + Success => return Ok(()), + }) + } +} + +/// Simple boolean for whether an account needs to be kept in existence. +#[derive(Copy, Clone, Eq, PartialEq)] +pub enum ExistenceRequirement { + /// Operation must not result in the account going out of existence. + /// + /// Note this implies that if the account never existed in the first place, then the operation + /// may legitimately leave the account unchanged and still non-existent. + KeepAlive, + /// Operation may result in account going out of existence. + AllowDeath, +} + +/// Status of funds. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +pub enum BalanceStatus { + /// Funds are free, as corresponding to `free` item in Balances. + Free, + /// Funds are reserved, as corresponding to `reserved` item in Balances. + Reserved, +} + +bitflags::bitflags! { + /// Reasons for moving funds out of an account. + #[derive(Encode, Decode)] + pub struct WithdrawReasons: u8 { + /// In order to pay for (system) transaction costs. + const TRANSACTION_PAYMENT = 0b00000001; + /// In order to transfer ownership. + const TRANSFER = 0b00000010; + /// In order to reserve some funds for a later return or repatriation. + const RESERVE = 0b00000100; + /// In order to pay some other (higher-level) fees. + const FEE = 0b00001000; + /// In order to tip a validator for transaction inclusion. + const TIP = 0b00010000; + } +} + +impl WithdrawReasons { + /// Choose all variants except for `one`. + /// + /// ```rust + /// # use frame_support::traits::WithdrawReasons; + /// # fn main() { + /// assert_eq!( + /// WithdrawReasons::FEE | WithdrawReasons::TRANSFER | WithdrawReasons::RESERVE | WithdrawReasons::TIP, + /// WithdrawReasons::except(WithdrawReasons::TRANSACTION_PAYMENT), + /// ); + /// # } + /// ``` + pub fn except(one: WithdrawReasons) -> WithdrawReasons { + let mut flags = Self::all(); + flags.toggle(one); + flags + } +} + +/// Simple amalgamation trait to collect together properties for an AssetId under one roof. +pub trait AssetId: FullCodec + Copy + Default + Eq + PartialEq {} +impl AssetId for T {} + +/// Simple amalgamation trait to collect together properties for a Balance under one roof. +pub trait Balance: AtLeast32BitUnsigned + FullCodec + Copy + Default {} +impl Balance for T {} diff --git a/frame/support/src/traits/validation.rs b/frame/support/src/traits/validation.rs new file mode 100644 index 0000000000000..900be7bb8e7e2 --- /dev/null +++ b/frame/support/src/traits/validation.rs @@ -0,0 +1,242 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits for dealing with validation and validators. + +use sp_std::prelude::*; +use codec::{Codec, Decode}; +use sp_runtime::traits::{Convert, Zero}; +use sp_runtime::{BoundToRuntimeAppPublic, ConsensusEngineId, Percent, RuntimeAppPublic}; +use sp_staking::SessionIndex; +use crate::dispatch::Parameter; +use crate::weights::Weight; + +/// A trait for online node inspection in a session. +/// +/// Something that can give information about the current validator set. +pub trait ValidatorSet { + /// Type for representing validator id in a session. + type ValidatorId: Parameter; + /// A type for converting `AccountId` to `ValidatorId`. + type ValidatorIdOf: Convert>; + + /// Returns current session index. + fn session_index() -> SessionIndex; + + /// Returns the active set of validators. + fn validators() -> Vec; +} + +/// [`ValidatorSet`] combined with an identification. +pub trait ValidatorSetWithIdentification: ValidatorSet { + /// Full identification of `ValidatorId`. + type Identification: Parameter; + /// A type for converting `ValidatorId` to `Identification`. + type IdentificationOf: Convert>; +} + +/// A trait for finding the author of a block header based on the `PreRuntime` digests contained +/// within it. +pub trait FindAuthor { + /// Find the author of a block based on the pre-runtime digests. + fn find_author<'a, I>(digests: I) -> Option + where I: 'a + IntoIterator; +} + +impl FindAuthor for () { + fn find_author<'a, I>(_: I) -> Option + where I: 'a + IntoIterator + { + None + } +} + +/// A trait for verifying the seal of a header and returning the author. +pub trait VerifySeal { + /// Verify a header and return the author, if any. + fn verify_seal(header: &Header) -> Result, &'static str>; +} + +/// A session handler for specific key type. +pub trait OneSessionHandler: BoundToRuntimeAppPublic { + /// The key type expected. + type Key: Decode + Default + RuntimeAppPublic; + + /// The given validator set will be used for the genesis session. + /// It is guaranteed that the given validator set will also be used + /// for the second session, therefore the first call to `on_new_session` + /// should provide the same validator set. + fn on_genesis_session<'a, I: 'a>(validators: I) + where I: Iterator, ValidatorId: 'a; + + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true when at least one of the session keys + /// or the underlying economic identities/distribution behind one the + /// session keys has changed, false otherwise. + /// + /// The `validators` are the validators of the incoming session, and `queued_validators` + /// will follow. + fn on_new_session<'a, I: 'a>( + changed: bool, + validators: I, + queued_validators: I, + ) where I: Iterator, ValidatorId: 'a; + + /// A notification for end of the session. + /// + /// Note it is triggered before any `SessionManager::end_session` handlers, + /// so we can still affect the validator set. + fn on_before_session_ending() {} + + /// A validator got disabled. Act accordingly until a new session begins. + fn on_disabled(_validator_index: usize); +} + +/// Something that can estimate at which block the next session rotation will happen (i.e. a new +/// session starts). +/// +/// The accuracy of the estimates is dependent on the specific implementation, but in order to get +/// the best estimate possible these methods should be called throughout the duration of the session +/// (rather than calling once and storing the result). +/// +/// This should be the same logical unit that dictates `ShouldEndSession` to the session module. No +/// assumptions are made about the scheduling of the sessions. +pub trait EstimateNextSessionRotation { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + + /// Return an estimate of the current session progress. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_current_session_progress(now: BlockNumber) -> (Option, Weight); + + /// Return the block number at which the next session rotation is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_next_session_rotation(now: BlockNumber) -> (Option, Weight); +} + +impl EstimateNextSessionRotation for () { + fn average_session_length() -> BlockNumber { + Zero::zero() + } + + fn estimate_current_session_progress(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } + + fn estimate_next_session_rotation(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } +} + +/// Something that can estimate at which block scheduling of the next session will happen (i.e when +/// we will try to fetch new validators). +/// +/// This only refers to the point when we fetch the next session details and not when we enact them +/// (for enactment there's `EstimateNextSessionRotation`). With `pallet-session` this should be +/// triggered whenever `SessionManager::new_session` is called. +/// +/// For example, if we are using a staking module this would be the block when the session module +/// would ask staking what the next validator set will be, as such this must always be implemented +/// by the session module. +pub trait EstimateNextNewSession { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + + /// Return the block number at which the next new session is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer. + fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight); +} + +impl EstimateNextNewSession for () { + fn average_session_length() -> BlockNumber { + Zero::zero() + } + + fn estimate_next_new_session(_: BlockNumber) -> (Option, Weight) { + (None, Zero::zero()) + } +} + +/// Something which can compute and check proofs of +/// a historical key owner and return full identification data of that +/// key owner. +pub trait KeyOwnerProofSystem { + /// The proof of membership itself. + type Proof: Codec; + /// The full identification of a key owner and the stash account. + type IdentificationTuple: Codec; + + /// Prove membership of a key owner in the current block-state. + /// + /// This should typically only be called off-chain, since it may be + /// computationally heavy. + /// + /// Returns `Some` iff the key owner referred to by the given `key` is a + /// member of the current set. + fn prove(key: Key) -> Option; + + /// Check a proof of membership on-chain. Return `Some` iff the proof is + /// valid and recent enough to check. + fn check_proof(key: Key, proof: Self::Proof) -> Option; +} + +impl KeyOwnerProofSystem for () { + // The proof and identification tuples is any bottom type to guarantee that the methods of this + // implementation can never be called or return anything other than `None`. + type Proof = crate::Void; + type IdentificationTuple = crate::Void; + + fn prove(_key: Key) -> Option { + None + } + + fn check_proof(_key: Key, _proof: Self::Proof) -> Option { + None + } +} + +/// Trait to be used by block producing consensus engine modules to determine +/// how late the current block is (e.g. in a slot-based proposal mechanism how +/// many slots were skipped since the previous block). +pub trait Lateness { + /// Returns a generic measure of how late the current block is compared to + /// its parent. + fn lateness(&self) -> N; +} + +impl Lateness for () { + fn lateness(&self) -> N { + Zero::zero() + } +} + +/// Implementors of this trait provide information about whether or not some validator has +/// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. +pub trait ValidatorRegistration { + /// Returns true if the provided validator ID has been registered with the implementing runtime + /// module + fn is_registered(id: &ValidatorId) -> bool; +} diff --git a/frame/support/src/traits/voting.rs b/frame/support/src/traits/voting.rs new file mode 100644 index 0000000000000..b6913a182d30b --- /dev/null +++ b/frame/support/src/traits/voting.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Traits and associated data structures concerned with voting, and moving between tokens and +//! votes. + +use sp_arithmetic::traits::{UniqueSaturatedInto, UniqueSaturatedFrom, SaturatedConversion}; + +/// A trait similar to `Convert` to convert values from `B` an abstract balance type +/// into u64 and back from u128. (This conversion is used in election and other places where complex +/// calculation over balance type is needed) +/// +/// Total issuance of the currency is passed in, but an implementation of this trait may or may not +/// use it. +/// +/// # WARNING +/// +/// the total issuance being passed in implies that the implementation must be aware of the fact +/// that its values can affect the outcome. This implies that if the vote value is dependent on the +/// total issuance, it should never ber written to storage for later re-use. +pub trait CurrencyToVote { + /// Convert balance to u64. + fn to_vote(value: B, issuance: B) -> u64; + + /// Convert u128 to balance. + fn to_currency(value: u128, issuance: B) -> B; +} + +/// An implementation of `CurrencyToVote` tailored for chain's that have a balance type of u128. +/// +/// The factor is the `(total_issuance / u64::max()).max(1)`, represented as u64. Let's look at the +/// important cases: +/// +/// If the chain's total issuance is less than u64::max(), this will always be 1, which means that +/// the factor will not have any effect. In this case, any account's balance is also less. Thus, +/// both of the conversions are basically an `as`; Any balance can fit in u64. +/// +/// If the chain's total issuance is more than 2*u64::max(), then a factor might be multiplied and +/// divided upon conversion. +pub struct U128CurrencyToVote; + +impl U128CurrencyToVote { + fn factor(issuance: u128) -> u128 { + (issuance / u64::max_value() as u128).max(1) + } +} + +impl CurrencyToVote for U128CurrencyToVote { + fn to_vote(value: u128, issuance: u128) -> u64 { + (value / Self::factor(issuance)).saturated_into() + } + + fn to_currency(value: u128, issuance: u128) -> u128 { + value.saturating_mul(Self::factor(issuance)) + } +} + + +/// A naive implementation of `CurrencyConvert` that simply saturates all conversions. +/// +/// # Warning +/// +/// This is designed to be used mostly for testing. Use with care, and think about the consequences. +pub struct SaturatingCurrencyToVote; + +impl + UniqueSaturatedFrom> CurrencyToVote for SaturatingCurrencyToVote { + fn to_vote(value: B, _: B) -> u64 { + value.unique_saturated_into() + } + + fn to_currency(value: u128, _: B) -> B { + B::unique_saturated_from(value) + } +} diff --git a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr index c5319da851078..7648f5c1bfb33 100644 --- a/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr +++ b/frame/support/test/tests/construct_runtime_ui/missing_system_module.stderr @@ -1,7 +1,6 @@ error: `System` pallet declaration is missing. Please add this line: `System: frame_system::{Pallet, Call, Storage, Config, Event},` --> $DIR/missing_system_module.rs:8:2 | -8 | { - | _____^ +8 | / { 9 | | } | |_____^ diff --git a/frame/support/test/tests/derive_no_bound.rs b/frame/support/test/tests/derive_no_bound.rs index b96fbcfba931d..3081a332b72c1 100644 --- a/frame/support/test/tests/derive_no_bound.rs +++ b/frame/support/test/tests/derive_no_bound.rs @@ -15,9 +15,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Tests for DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, and RuntimeDebugNoBound +//! Tests for DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound, and +//! RuntimeDebugNoBound -use frame_support::{DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; +use frame_support::{ + DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DefaultNoBound, +}; #[derive(RuntimeDebugNoBound)] struct Unnamed(u64); @@ -29,7 +32,7 @@ fn runtime_debug_no_bound_display_correctly() { } trait Config { - type C: std::fmt::Debug + Clone + Eq + PartialEq; + type C: std::fmt::Debug + Clone + Eq + PartialEq + Default; } struct Runtime; @@ -39,7 +42,7 @@ impl Config for Runtime { type C = u32; } -#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] struct StructNamed { a: u32, b: u64, @@ -56,6 +59,12 @@ fn test_struct_named() { phantom: Default::default(), }; + let a_default: StructNamed:: = Default::default(); + assert_eq!(a_default.a, 0); + assert_eq!(a_default.b, 0); + assert_eq!(a_default.c, 0); + assert_eq!(a_default.phantom, Default::default()); + let a_2 = a_1.clone(); assert_eq!(a_2.a, 1); assert_eq!(a_2.b, 2); @@ -76,7 +85,7 @@ fn test_struct_named() { assert!(b != a_1); } -#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] struct StructUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>); #[test] @@ -88,6 +97,12 @@ fn test_struct_unnamed() { Default::default(), ); + let a_default: StructUnnamed:: = Default::default(); + assert_eq!(a_default.0, 0); + assert_eq!(a_default.1, 0); + assert_eq!(a_default.2, 0); + assert_eq!(a_default.3, Default::default()); + let a_2 = a_1.clone(); assert_eq!(a_2.0, 1); assert_eq!(a_2.1, 2); @@ -108,7 +123,7 @@ fn test_struct_unnamed() { assert!(b != a_1); } -#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound)] +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] enum Enum { VariantUnnamed(u32, u64, T::C, core::marker::PhantomData<(U, V)>), VariantNamed { @@ -121,6 +136,32 @@ enum Enum { VariantUnit2, } +// enum that will have a named default. +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +enum Enum2 { + VariantNamed { + a: u32, + b: u64, + c: T::C, + }, + VariantUnnamed(u32, u64, T::C), + VariantUnit, + VariantUnit2, +} + +// enum that will have a unit default. +#[derive(DebugNoBound, CloneNoBound, EqNoBound, PartialEqNoBound, DefaultNoBound)] +enum Enum3 { + VariantUnit, + VariantNamed { + a: u32, + b: u64, + c: T::C, + }, + VariantUnnamed(u32, u64, T::C), + VariantUnit2, +} + #[test] fn test_enum() { type TestEnum = Enum::; @@ -131,6 +172,22 @@ fn test_enum() { let variant_2 = TestEnum::VariantUnit; let variant_3 = TestEnum::VariantUnit2; + let default: TestEnum = Default::default(); + assert_eq!( + default, + // first variant is default. + TestEnum::VariantUnnamed(0, 0, 0, Default::default()) + ); + + assert_eq!( + Enum2::::default(), + Enum2::::VariantNamed { a: 0, b: 0, c: 0}, + ); + assert_eq!( + Enum3::::default(), + Enum3::::VariantUnit, + ); + assert!(variant_0 != variant_0_bis); assert!(variant_1 != variant_1_bis); assert!(variant_0 != variant_1); diff --git a/frame/support/test/tests/derive_no_bound_ui/default.rs b/frame/support/test/tests/derive_no_bound_ui/default.rs new file mode 100644 index 0000000000000..0780a88e6753d --- /dev/null +++ b/frame/support/test/tests/derive_no_bound_ui/default.rs @@ -0,0 +1,10 @@ +trait Config { + type C; +} + +#[derive(frame_support::DefaultNoBound)] +struct Foo { + c: T::C, +} + +fn main() {} diff --git a/frame/support/test/tests/derive_no_bound_ui/default.stderr b/frame/support/test/tests/derive_no_bound_ui/default.stderr new file mode 100644 index 0000000000000..d58b5e9185268 --- /dev/null +++ b/frame/support/test/tests/derive_no_bound_ui/default.stderr @@ -0,0 +1,7 @@ +error[E0277]: the trait bound `::C: std::default::Default` is not satisfied + --> $DIR/default.rs:7:2 + | +7 | c: T::C, + | ^ the trait `std::default::Default` is not implemented for `::C` + | + = note: required by `std::default::Default::default` diff --git a/frame/support/test/tests/derive_no_bound_ui/eq.stderr b/frame/support/test/tests/derive_no_bound_ui/eq.stderr index bbd907adecb33..36384178d469b 100644 --- a/frame/support/test/tests/derive_no_bound_ui/eq.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/eq.stderr @@ -4,4 +4,9 @@ error[E0277]: can't compare `Foo` with `Foo` 6 | struct Foo { | ^^^ no implementation for `Foo == Foo` | + ::: $RUST/core/src/cmp.rs + | + | pub trait Eq: PartialEq { + | --------------- required by this bound in `Eq` + | = help: the trait `PartialEq` is not implemented for `Foo` diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index e0dd1d1891d26..dbffead8ad2b0 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -122,6 +122,10 @@ mod module1 { fn check_inherent(_: &Self::Call, _: &InherentData) -> std::result::Result<(), Self::Error> { unimplemented!(); } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } } } @@ -182,6 +186,10 @@ mod module2 { fn check_inherent(_call: &Self::Call, _data: &InherentData) -> std::result::Result<(), Self::Error> { unimplemented!(); } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } } } diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 5387312819c87..8fc056a2f36a5 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -288,6 +288,10 @@ pub mod pallet { T::AccountId::from(SomeType6); // Test for where clause unimplemented!(); } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } } #[derive(codec::Encode, sp_runtime::RuntimeDebug)] @@ -365,6 +369,25 @@ pub mod pallet2 { } } +/// Test that the supertrait check works when we pass some parameter to the `frame_system::Config`. +#[frame_support::pallet] +pub mod pallet3 { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} +} + frame_support::parameter_types!( pub const MyGetParam: u32= 10; pub const MyGetParam2: u32= 11; @@ -395,6 +418,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet::Config for Runtime { type Event = Event; @@ -837,3 +861,14 @@ fn metadata() { pretty_assertions::assert_eq!(pallet_metadata, expected_pallet_metadata); } + +#[test] +fn test_pallet_info_access() { + assert_eq!(::name(), "System"); + assert_eq!(::name(), "Example"); + assert_eq!(::name(), "Example2"); + + assert_eq!(::index(), 0); + assert_eq!(::index(), 1); + assert_eq!(::index(), 2); +} diff --git a/frame/support/test/tests/pallet_compatibility.rs b/frame/support/test/tests/pallet_compatibility.rs index 95e1c027eb3fa..a953b19607d90 100644 --- a/frame/support/test/tests/pallet_compatibility.rs +++ b/frame/support/test/tests/pallet_compatibility.rs @@ -225,6 +225,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet::Config for Runtime { type Event = Event; diff --git a/frame/support/test/tests/pallet_compatibility_instance.rs b/frame/support/test/tests/pallet_compatibility_instance.rs index 603c583ae217f..5ce20012c736d 100644 --- a/frame/support/test/tests/pallet_compatibility_instance.rs +++ b/frame/support/test/tests/pallet_compatibility_instance.rs @@ -217,6 +217,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet::Config for Runtime { type Event = Event; diff --git a/frame/support/test/tests/pallet_instance.rs b/frame/support/test/tests/pallet_instance.rs index 1bf4c1af09280..232a25ff5bf27 100644 --- a/frame/support/test/tests/pallet_instance.rs +++ b/frame/support/test/tests/pallet_instance.rs @@ -170,6 +170,10 @@ pub mod pallet { fn create_inherent(_data: &InherentData) -> Option { unimplemented!(); } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } } #[derive(codec::Encode, sp_runtime::RuntimeDebug)] @@ -260,6 +264,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet::Config for Runtime { type Event = Event; @@ -707,3 +712,18 @@ fn metadata() { pretty_assertions::assert_eq!(pallet_metadata, expected_pallet_metadata); pretty_assertions::assert_eq!(pallet_instance1_metadata, expected_pallet_instance1_metadata); } + +#[test] +fn test_pallet_info_access() { + assert_eq!(::name(), "System"); + assert_eq!(::name(), "Example"); + assert_eq!(::name(), "Instance1Example"); + assert_eq!(::name(), "Example2"); + assert_eq!(::name(), "Instance1Example2"); + + assert_eq!(::index(), 0); + assert_eq!(::index(), 1); + assert_eq!(::index(), 2); + assert_eq!(::index(), 3); + assert_eq!(::index(), 4); +} diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index a2998788736ac..8a6ee8b8f5045 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -1,10 +1,10 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is not satisfied - --> $DIR/genesis_default_not_satisfied.rs:22:18 - | -22 | impl GenesisBuild for GenesisConfig {} - | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` - | - ::: $WORKSPACE/frame/support/src/traits.rs - | - | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { - | ------- required by this bound in `GenesisBuild` + --> $DIR/genesis_default_not_satisfied.rs:22:18 + | +22 | impl GenesisBuild for GenesisConfig {} + | ^^^^^^^^^^^^^^^ the trait `std::default::Default` is not implemented for `pallet::GenesisConfig` + | + ::: $WORKSPACE/frame/support/src/traits/hooks.rs + | + | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | ------- required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index 0379448f694fc..3812b433e20ca 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -1,5 +1,15 @@ -error[E0107]: wrong number of type arguments: expected 1, found 0 - --> $DIR/hooks_invalid_item.rs:12:18 - | -12 | impl Hooks for Pallet {} - | ^^^^^ expected 1 type argument +error[E0107]: missing generics for trait `Hooks` + --> $DIR/hooks_invalid_item.rs:12:18 + | +12 | impl Hooks for Pallet {} + | ^^^^^ expected 1 type argument + | +note: trait defined here, with 1 type parameter: `BlockNumber` + --> $DIR/hooks.rs:206:11 + | +206 | pub trait Hooks { + | ^^^^^ ----------- +help: use angle brackets to add missing type argument + | +12 | impl Hooks for Pallet {} + | ^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr index 75a522889ebd9..bc34c55241a76 100644 --- a/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr +++ b/frame/support/test/tests/pallet_ui/inherent_check_inner_span.stderr @@ -1,10 +1,11 @@ -error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent` +error[E0046]: not all trait items implemented, missing: `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent`, `is_inherent` --> $DIR/inherent_check_inner_span.rs:19:2 | 19 | impl ProvideInherent for Pallet {} - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent` in implementation + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ missing `Call`, `Error`, `INHERENT_IDENTIFIER`, `create_inherent`, `is_inherent` in implementation | = help: implement the missing item: `type Call = Type;` = help: implement the missing item: `type Error = Type;` = help: implement the missing item: `const INHERENT_IDENTIFIER: [u8; 8] = value;` = help: implement the missing item: `fn create_inherent(_: &InherentData) -> std::option::Option<::Call> { todo!() }` + = help: implement the missing item: `fn is_inherent(_: &::Call) -> bool { todo!() }` diff --git a/frame/support/test/tests/pallet_version.rs b/frame/support/test/tests/pallet_version.rs index b3436b7baed9a..5c33d45aea644 100644 --- a/frame/support/test/tests/pallet_version.rs +++ b/frame/support/test/tests/pallet_version.rs @@ -166,6 +166,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } frame_support::construct_runtime!( diff --git a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs index e0b2dfa0bef8d..4a111fb494e16 100644 --- a/frame/support/test/tests/pallet_with_name_trait_is_valid.rs +++ b/frame/support/test/tests/pallet_with_name_trait_is_valid.rs @@ -46,7 +46,7 @@ frame_support::decl_module! { const Foo: u32 = u32::max_value(); #[weight = 0] - fn accumulate_dummy(origin, increase_by: T::Balance) { + fn accumulate_dummy(_origin, _increase_by: T::Balance) { unimplemented!(); } @@ -81,6 +81,10 @@ impl sp_inherents::ProvideInherent for Module { fn check_inherent(_: &Self::Call, _: &sp_inherents::InherentData) -> std::result::Result<(), Self::Error> { unimplemented!(); } + + fn is_inherent(_call: &Self::Call) -> bool { + unimplemented!(); + } } #[cfg(test)] @@ -141,6 +145,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_test::Config for Runtime { diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index 19858731b3a09..c4d7cf01ae215 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -36,7 +36,7 @@ pub trait Config: 'static + Eq + Clone { frame_support::decl_module! { pub struct Module for enum Call where origin: T::Origin, system=self { #[weight = 0] - fn noop(origin) {} + fn noop(_origin) {} } } diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 3ebee534a64e1..47980a88164e5 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -89,6 +89,7 @@ impl system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl module::Config for Runtime { diff --git a/frame/system/benchmarking/src/mock.rs b/frame/system/benchmarking/src/mock.rs index 23da1fee5617a..253945a598bdc 100644 --- a/frame/system/benchmarking/src/mock.rs +++ b/frame/system/benchmarking/src/mock.rs @@ -61,6 +61,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl crate::Config for Test {} diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 3cb74a7ed918d..cb25c3c027889 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -120,6 +120,7 @@ impl SignedExtension for CheckNonce where mod tests { use super::*; use crate::mock::{Test, new_test_ext, CALL}; + use frame_support::{assert_noop, assert_ok}; #[test] fn signed_ext_check_nonce_works() { @@ -134,14 +135,23 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // stale - assert!(CheckNonce::(0).validate(&1, CALL, &info, len).is_err()); - assert!(CheckNonce::(0).pre_dispatch(&1, CALL, &info, len).is_err()); + assert_noop!( + CheckNonce::(0).validate(&1, CALL, &info, len), + InvalidTransaction::Stale + ); + assert_noop!( + CheckNonce::(0).pre_dispatch(&1, CALL, &info, len), + InvalidTransaction::Stale + ); // correct - assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len).is_ok()); + assert_ok!(CheckNonce::(1).validate(&1, CALL, &info, len)); + assert_ok!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len)); // future - assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(5).pre_dispatch(&1, CALL, &info, len).is_err()); + assert_ok!(CheckNonce::(5).validate(&1, CALL, &info, len)); + assert_noop!( + CheckNonce::(5).pre_dispatch(&1, CALL, &info, len), + InvalidTransaction::Future + ); }) } } diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index fc9898b778b8d..e01c91317615d 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -18,7 +18,7 @@ use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Encode, Decode}; use sp_runtime::{ - traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, Printable}, + traits::{SignedExtension, DispatchInfoOf, Dispatchable, PostDispatchInfoOf}, transaction_validity::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, TransactionPriority, @@ -26,7 +26,7 @@ use sp_runtime::{ DispatchResult, }; use frame_support::{ - traits::{Get}, + traits::Get, weights::{PostDispatchInfo, DispatchInfo, DispatchClass, priority::FrameTransactionPriority}, }; @@ -248,9 +248,7 @@ impl SignedExtension for CheckWeight where // to them actually being useful. Block producers are thus not allowed to include mandatory // extrinsics that result in error. if let (DispatchClass::Mandatory, Err(e)) = (info.class, result) { - "Bad mandatory".print(); - e.print(); - + log::error!(target: "runtime::system", "Bad mandatory: {:?}", e); Err(InvalidTransaction::BadMandatory)? } @@ -283,8 +281,7 @@ mod tests { use crate::{BlockWeight, AllExtrinsicsLen}; use crate::mock::{Test, CALL, new_test_ext, System}; use sp_std::marker::PhantomData; - use frame_support::{assert_ok, assert_noop}; - use frame_support::weights::{Weight, Pays}; + use frame_support::{assert_err, assert_ok, weights::{Weight, Pays}}; fn block_weights() -> crate::limits::BlockWeights { ::BlockWeights::get() @@ -337,11 +334,7 @@ mod tests { ..Default::default() }; let len = 0_usize; - - assert_noop!( - CheckWeight::::do_validate(&max, len), - InvalidTransaction::ExhaustsResources - ); + assert_err!(CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources); }); } @@ -373,10 +366,7 @@ mod tests { ..Default::default() }) ); - assert_noop!( - CheckWeight::::do_validate(&max, len), - InvalidTransaction::ExhaustsResources - ); + assert_err!(CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources); }); } @@ -439,15 +429,13 @@ mod tests { let dispatch_operational = DispatchInfo { weight: 251, class: DispatchClass::Operational, ..Default::default() }; let len = 0_usize; - assert_noop!( - CheckWeight::::do_pre_dispatch(&dispatch_normal, len), + assert_err!( CheckWeight::::do_pre_dispatch(&dispatch_normal, len), InvalidTransaction::ExhaustsResources ); // Thank goodness we can still do an operational transaction to possibly save the blockchain. assert_ok!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len)); // Not too much though - assert_noop!( - CheckWeight::::do_pre_dispatch(&dispatch_operational, len), + assert_err!(CheckWeight::::do_pre_dispatch(&dispatch_operational, len), InvalidTransaction::ExhaustsResources ); // Even with full block, validity of single transaction should be correct. @@ -468,15 +456,19 @@ mod tests { current_weight.set(normal_limit, DispatchClass::Normal) }); // will not fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); + assert_err!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), + InvalidTransaction::ExhaustsResources + ); // will fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); + assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::::put(normal_length_limit()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); + assert_err!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len), + InvalidTransaction::ExhaustsResources + ); + assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len)); }) } @@ -577,10 +569,7 @@ mod tests { let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); assert_eq!(BlockWeight::::get().total(), info.weight + 256); - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); + assert_ok!( CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(()))); assert_eq!( BlockWeight::::get().total(), post_info.actual_weight.unwrap() + 256, @@ -609,10 +598,7 @@ mod tests { info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, ); - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); + assert_ok!(CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(()))); assert_eq!( BlockWeight::::get().total(), info.weight + 128 + block_weights().get(DispatchClass::Normal).base_extrinsic, @@ -632,8 +618,7 @@ mod tests { System::block_weight().total(), weights.base_block ); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); - assert!(r.is_ok()); + assert_ok!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len)); assert_eq!( System::block_weight().total(), weights.get(DispatchClass::Normal).base_extrinsic + weights.base_block @@ -689,15 +674,14 @@ mod tests { let mandatory2 = DispatchInfo { weight: 6, class: DispatchClass::Mandatory, ..Default::default() }; // when - let result1 = calculate_consumed_weight::<::Call>( - maximum_weight.clone(), all_weight.clone(), &mandatory1 + assert_ok!( + calculate_consumed_weight::<::Call>( + maximum_weight.clone(), all_weight.clone(), &mandatory1 + ) ); - let result2 = calculate_consumed_weight::<::Call>( - maximum_weight, all_weight, &mandatory2 + assert_err!( + calculate_consumed_weight::<::Call>( maximum_weight, all_weight, &mandatory2), + InvalidTransaction::ExhaustsResources ); - - // then - assert!(result2.is_err()); - assert!(result1.is_ok()); } } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index ebf9eb38375bb..bd6ef5eb50944 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -87,14 +87,14 @@ use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use frame_support::{ Parameter, storage, traits::{ - Contains, Get, PalletInfo, OnNewAccount, OnKilledAccount, HandleLifetime, + SortedMembers, Get, PalletInfo, OnNewAccount, OnKilledAccount, HandleLifetime, StoredMap, EnsureOrigin, OriginTrait, Filter, }, weights::{ Weight, RuntimeDbWeight, DispatchInfo, DispatchClass, extract_actual_weight, PerDispatchClass, }, - dispatch::DispatchResultWithPostInfo, + dispatch::{DispatchResultWithPostInfo, DispatchResult}, }; use codec::{Encode, Decode, FullCodec, EncodeLike}; @@ -140,6 +140,19 @@ pub type ConsumedWeight = PerDispatchClass; pub use pallet::*; +/// Do something when we should be setting the code. +pub trait SetCode { + /// Set the code to the given blob. + fn set_code(code: Vec) -> DispatchResult; +} + +impl SetCode for () { + fn set_code(code: Vec) -> DispatchResult { + storage::unhashed::put_raw(well_known_keys::CODE, &code); + Ok(()) + } +} + #[frame_support::pallet] pub mod pallet { use crate::{*, pallet_prelude::*, self as frame_system}; @@ -253,6 +266,10 @@ pub mod pallet { /// an identifier of the chain. #[pallet::constant] type SS58Prefix: Get; + + /// What to do if the user wants the code set to something. Just use `()` unless you are in + /// cumulus. + type OnSetCode: SetCode; } #[pallet::pallet] @@ -329,7 +346,7 @@ pub mod pallet { ensure_root(origin)?; Self::can_set_code(&code)?; - storage::unhashed::put_raw(well_known_keys::CODE, &code); + T::OnSetCode::set_code(code)?; Self::deposit_event(Event::CodeUpdated); Ok(().into()) } @@ -348,7 +365,7 @@ pub mod pallet { code: Vec, ) -> DispatchResultWithPostInfo { ensure_root(origin)?; - storage::unhashed::put_raw(well_known_keys::CODE, &code); + T::OnSetCode::set_code(code)?; Self::deposit_event(Event::CodeUpdated); Ok(().into()) } @@ -853,7 +870,7 @@ impl< pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); impl< O: Into, O>> + From>, - Who: Contains, + Who: SortedMembers, AccountId: PartialEq + Clone + Ord + Default, > EnsureOrigin for EnsureSignedBy { type Success = AccountId; @@ -1201,11 +1218,22 @@ impl Pallet { Account::::get(who).consumers } - /// True if the account has some outstanding references. + /// True if the account has some outstanding consumer references. pub fn is_provider_required(who: &T::AccountId) -> bool { Account::::get(who).consumers != 0 } + /// True if the account has no outstanding consumer references or more than one provider. + pub fn can_dec_provider(who: &T::AccountId) -> bool { + let a = Account::::get(who); + a.consumers == 0 || a.providers > 1 + } + + /// True if the account has at least one provider reference. + pub fn can_inc_consumer(who: &T::AccountId) -> bool { + Account::::get(who).providers > 0 + } + /// Deposits an event into this block's event record. pub fn deposit_event(event: impl Into) { Self::deposit_event_indexed(&[], event.into()); diff --git a/frame/system/src/mock.rs b/frame/system/src/mock.rs index 43c7d8d252774..0f53532eb8f6b 100644 --- a/frame/system/src/mock.rs +++ b/frame/system/src/mock.rs @@ -107,6 +107,7 @@ impl Config for Test { type OnKilledAccount = RecordKilled; type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } pub type SysEvent = frame_system::Event; diff --git a/frame/system/src/tests.rs b/frame/system/src/tests.rs index 9f500e5a3b050..7ad4344ae5c28 100644 --- a/frame/system/src/tests.rs +++ b/frame/system/src/tests.rs @@ -19,7 +19,9 @@ use crate::*; use mock::{*, Origin}; use sp_core::H256; use sp_runtime::{DispatchError, DispatchErrorWithPostInfo, traits::{Header, BlakeTwo256}}; -use frame_support::{assert_noop, weights::WithPostDispatchInfo, dispatch::PostDispatchInfo}; +use frame_support::{ + assert_noop, assert_ok, weights::WithPostDispatchInfo, dispatch::PostDispatchInfo +}; #[test] fn origin_works() { @@ -31,7 +33,7 @@ fn origin_works() { #[test] fn stored_map_works() { new_test_ext().execute_with(|| { - assert!(System::insert(&0, 42).is_ok()); + assert_ok!(System::insert(&0, 42)); assert!(!System::is_provider_required(&0)); assert_eq!(Account::::get(0), AccountInfo { @@ -42,17 +44,17 @@ fn stored_map_works() { data: 42, }); - assert!(System::inc_consumers(&0).is_ok()); + assert_ok!(System::inc_consumers(&0)); assert!(System::is_provider_required(&0)); - assert!(System::insert(&0, 69).is_ok()); + assert_ok!(System::insert(&0, 69)); assert!(System::is_provider_required(&0)); System::dec_consumers(&0); assert!(!System::is_provider_required(&0)); assert!(KILLED.with(|r| r.borrow().is_empty())); - assert!(System::remove(&0).is_ok()); + assert_ok!(System::remove(&0)); assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); }); } @@ -122,7 +124,7 @@ fn sufficient_cannot_support_consumer() { assert_noop!(System::inc_consumers(&0), IncRefError::NoProviders); assert_eq!(System::inc_providers(&0), IncRefStatus::Existed); - assert!(System::inc_consumers(&0).is_ok()); + assert_ok!(System::inc_consumers(&0)); assert_noop!(System::dec_providers(&0), DecRefError::ConsumerRemaining); }); } @@ -140,7 +142,7 @@ fn provider_required_to_support_consumer() { assert_eq!(System::dec_providers(&0).unwrap(), DecRefStatus::Exists); assert_eq!(System::account_nonce(&0), 1); - assert!(System::inc_consumers(&0).is_ok()); + assert_ok!(System::inc_consumers(&0)); assert_noop!(System::dec_providers(&0), DecRefError::ConsumerRemaining); System::dec_consumers(&0); @@ -516,7 +518,7 @@ fn ensure_one_of_works() { assert_eq!(ensure_root_or_signed(RawOrigin::Root).unwrap(), Either::Left(())); assert_eq!(ensure_root_or_signed(RawOrigin::Signed(0)).unwrap(), Either::Right(0)); - assert!(ensure_root_or_signed(RawOrigin::None).is_err()) + assert!(ensure_root_or_signed(RawOrigin::None).is_err()); } #[test] diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 01aa6ff3cf261..05ea8e40c6627 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -15,7 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io", optional = true } @@ -41,7 +40,6 @@ std = [ "sp-runtime/std", "frame-benchmarking/std", "frame-support/std", - "serde", "frame-system/std", "sp-timestamp/std", "log/std", diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index d467551196850..ce6fd09bb7828 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -241,6 +241,10 @@ pub mod pallet { Ok(()) } } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::set(_)) + } } } @@ -254,7 +258,7 @@ impl Pallet { } /// Set the timestamp to something in particular. Only used for tests. - #[cfg(feature = "std")] + #[cfg(any(feature = "runtime-benchmarks", feature = "std"))] pub fn set_timestamp(now: T::Moment) { Now::::put(now); } @@ -352,6 +356,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const MinimumPeriod: u64 = 5; diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index e6a0284d82307..6c304fabb5a25 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use frame_system::RawOrigin; use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; -use sp_runtime::{traits::{Saturating}}; +use sp_runtime::traits::Saturating; use crate::Module as TipsMod; diff --git a/frame/tips/src/lib.rs b/frame/tips/src/lib.rs index 6d85df33f10c9..b31468797ce41 100644 --- a/frame/tips/src/lib.rs +++ b/frame/tips/src/lib.rs @@ -68,7 +68,7 @@ use frame_support::traits::{ use sp_runtime::{ Percent, RuntimeDebug, traits::{ Zero, AccountIdConversion, Hash, BadOrigin }}; -use frame_support::traits::{Contains, ContainsLengthBound, OnUnbalanced, EnsureOrigin}; +use frame_support::traits::{SortedMembers, ContainsLengthBound, OnUnbalanced, EnsureOrigin}; use codec::{Encode, Decode}; use frame_system::{self as system, ensure_signed}; pub use weights::WeightInfo; @@ -86,7 +86,7 @@ pub trait Config: frame_system::Config + pallet_treasury::Config { /// Origin from which tippers must come. /// /// `ContainsLengthBound::max_len` must be cost free (i.e. no storage read or heavy operation). - type Tippers: Contains + ContainsLengthBound; + type Tippers: SortedMembers + ContainsLengthBound; /// The period for which a tip remains open after is has achieved threshold tippers. type TipCountdown: Get; @@ -195,7 +195,6 @@ decl_module! { for enum Call where origin: T::Origin { - /// The period for which a tip remains open after is has achieved threshold tippers. const TipCountdown: T::BlockNumber = T::TipCountdown::get(); @@ -445,7 +444,7 @@ impl Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() + T::PalletId::get().into_account() } /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it @@ -550,13 +549,13 @@ impl Module { tips: Vec<(AccountId, Balance)>, } - use frame_support::{Twox64Concat, migration::StorageKeyIterator}; + use frame_support::{Twox64Concat, migration::storage_key_iter}; - for (hash, old_tip) in StorageKeyIterator::< + for (hash, old_tip) in storage_key_iter::< T::Hash, OldOpenTip, T::BlockNumber, T::Hash>, Twox64Concat, - >::new(b"Treasury", b"Tips").drain() + >(b"Treasury", b"Tips").drain() { let (finder, deposit, finders_fee) = match old_tip.finder { diff --git a/frame/tips/src/tests.rs b/frame/tips/src/tests.rs index ef30962fc846f..3b11e105c6d06 100644 --- a/frame/tips/src/tests.rs +++ b/frame/tips/src/tests.rs @@ -22,11 +22,15 @@ use crate as tips; use super::*; use std::cell::RefCell; -use frame_support::{assert_noop, assert_ok, parameter_types, weights::Weight, traits::Contains}; +use frame_support::{ + assert_noop, assert_ok, parameter_types, + weights::Weight, traits::SortedMembers, + PalletId +}; use sp_runtime::Permill; use sp_core::H256; use sp_runtime::{ - Perbill, ModuleId, + Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, }; @@ -76,6 +80,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -93,7 +98,7 @@ thread_local! { static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } pub struct TenToFourteen; -impl Contains for TenToFourteen { +impl SortedMembers for TenToFourteen { fn sorted_members() -> Vec { TEN_TO_FOURTEEN.with(|v| { v.borrow().clone() @@ -120,11 +125,12 @@ parameter_types! { pub const SpendPeriod: u64 = 2; pub const Burn: Permill = Permill::from_percent(50); pub const DataDepositPerByte: u64 = 1; - pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const MaximumReasonLength: u32 = 16384; + pub const MaxApprovals: u32 = 100; } impl pallet_treasury::Config for Test { - type ModuleId = TreasuryModuleId; + type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; @@ -137,6 +143,7 @@ impl pallet_treasury::Config for Test { type BurnDestination = (); // Just gets burned. type WeightInfo = (); type SpendFunds = (); + type MaxApprovals = MaxApprovals; } parameter_types! { pub const TipCountdown: u64 = 1; diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index b2dc2c9859e0b..9ee172931f4e6 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -604,7 +604,7 @@ mod tests { use frame_system as system; use codec::Encode; use frame_support::{ - parameter_types, + assert_noop, assert_ok, parameter_types, weights::{ DispatchClass, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Weight, WeightToFeePolynomial, WeightToFeeCoefficients, WeightToFeeCoefficient, @@ -615,7 +615,8 @@ mod tests { use sp_core::H256; use sp_runtime::{ testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup}, + traits::{BlakeTwo256, IdentityLookup, One}, + transaction_validity::InvalidTransaction, Perbill, }; use std::cell::RefCell; @@ -687,6 +688,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -825,10 +827,9 @@ mod tests { .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - assert!( + assert_ok!( ChargeTransactionPayment:: ::post_dispatch(pre, &info_from_weight(5), &default_post_info(), len, &Ok(())) - .is_ok() ); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); @@ -837,10 +838,9 @@ mod tests { .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - assert!( + assert_ok!( ChargeTransactionPayment:: ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() ); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); }); @@ -863,10 +863,9 @@ mod tests { // 5 base fee, 10 byte fee, 3/2 * 100 weight fee, 5 tip assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 150 - 5); - assert!( + assert_ok!( ChargeTransactionPayment:: ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() ); // 75 (3/2 of the returned 50 units of weight) is refunded assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 75 - 5); @@ -882,10 +881,9 @@ mod tests { .execute_with(|| { // maximum weight possible - assert!( + assert_ok!( ChargeTransactionPayment::::from(0) .pre_dispatch(&1, CALL, &info_from_weight(Weight::max_value()), 10) - .is_ok() ); // fee will be proportional to what is the actual maximum weight in the runtime. assert_eq!( @@ -914,10 +912,9 @@ mod tests { class: DispatchClass::Operational, pays_fee: Pays::No, }; - assert!( + assert_ok!( ChargeTransactionPayment::::from(0) .validate(&1, CALL, &operational_transaction , len) - .is_ok() ); // like a InsecureFreeNormal @@ -926,10 +923,10 @@ mod tests { class: DispatchClass::Normal, pays_fee: Pays::Yes, }; - assert!( + assert_noop!( ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &free_transaction , len) - .is_err() + .validate(&1, CALL, &free_transaction , len), + TransactionValidityError::Invalid(InvalidTransaction::Payment), ); }); } @@ -946,10 +943,9 @@ mod tests { NextFeeMultiplier::put(Multiplier::saturating_from_rational(3, 2)); let len = 10; - assert!( + assert_ok!( ChargeTransactionPayment::::from(10) // tipped .pre_dispatch(&1, CALL, &info_from_weight(3), len) - .is_ok() ); assert_eq!( Balances::free_balance(1), @@ -1145,13 +1141,12 @@ mod tests { assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); // kill the account between pre and post dispatch - assert!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2)).is_ok()); + assert_ok!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2))); assert_eq!(Balances::free_balance(2), 0); - assert!( + assert_ok!( ChargeTransactionPayment:: ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() ); assert_eq!(Balances::free_balance(2), 0); // Transfer Event @@ -1179,10 +1174,9 @@ mod tests { .unwrap(); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - assert!( + assert_ok!( ChargeTransactionPayment:: ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(101), len, &Ok(())) - .is_ok() ); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); }); @@ -1209,10 +1203,9 @@ mod tests { .pre_dispatch(&user, CALL, &dispatch_info, len) .unwrap(); assert_eq!(Balances::total_balance(&user), 0); - assert!( + assert_ok!( ChargeTransactionPayment:: ::post_dispatch(pre, &dispatch_info, &default_post_info(), len, &Ok(())) - .is_ok() ); assert_eq!(Balances::total_balance(&user), 0); // No events for such a scenario diff --git a/frame/transaction-payment/src/payment.rs b/frame/transaction-payment/src/payment.rs index f84b19d78c297..7292ef4dfee7e 100644 --- a/frame/transaction-payment/src/payment.rs +++ b/frame/transaction-payment/src/payment.rs @@ -117,6 +117,7 @@ where // merge the imbalance caused by paying the fees and refunding parts of it again. let adjusted_paid = paid .offset(refund_imbalance) + .same() .map_err(|_| TransactionValidityError::Invalid(InvalidTransaction::Payment))?; // Call someone else to handle the imbalance (fee and tip separately) let imbalances = adjusted_paid.split(tip); diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 119516fe2741a..64ecbebe0bff9 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -55,7 +55,7 @@ fn create_approved_proposals, I: Instance>(n: u32) -> Result<(), &' let proposal_id = >::get() - 1; Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; } - ensure!(>::get().len() == n as usize, "Not all approved"); + ensure!(>::get().len() == n as usize, "Not all approved"); Ok(()) } @@ -85,6 +85,8 @@ benchmarks_instance! { }: _(RawOrigin::Root, proposal_id) approve_proposal { + let p in 0 .. T::MaxApprovals::get() - 1; + create_approved_proposals::(p)?; let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); Treasury::::propose_spend( RawOrigin::Signed(caller).into(), @@ -95,7 +97,7 @@ benchmarks_instance! { }: _(RawOrigin::Root, proposal_id) on_initialize_proposals { - let p in 0 .. 100; + let p in 0 .. T::MaxApprovals::get(); setup_pot_account::(); create_approved_proposals::(p)?; }: { diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index cef50706b5173..473a570a87256 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -63,21 +63,25 @@ mod benchmarking; pub mod weights; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error}; +use frame_support::{ + decl_module, decl_storage, decl_event, ensure, print, decl_error, + PalletId, BoundedVec, bounded_vec::TryAppendValue, +}; use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::{KeepAlive}, - ReservableCurrency, WithdrawReasons + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::KeepAlive, + ReservableCurrency, WithdrawReasons, +}; +use sp_runtime::{ + Permill, RuntimeDebug, + traits::{ + Zero, StaticLookup, AccountIdConversion, Saturating + } }; -use sp_runtime::{Permill, ModuleId, RuntimeDebug, traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating -}}; use frame_support::weights::{Weight, DispatchClass}; -use frame_support::traits::{EnsureOrigin}; +use frame_support::traits::EnsureOrigin; use codec::{Encode, Decode}; -use frame_system::{ensure_signed}; +use frame_system::ensure_signed; pub use weights::WeightInfo; pub type BalanceOf = @@ -89,7 +93,7 @@ pub type NegativeImbalanceOf = pub trait Config: frame_system::Config { /// The treasury's module id, used for deriving its sovereign account ID. - type ModuleId: Get; + type PalletId: Get; /// The staking balance. type Currency: Currency + ReservableCurrency; @@ -127,6 +131,9 @@ pub trait Config: frame_system::Config { /// Runtime hooks to external pallet using treasury to compute spend funds. type SpendFunds: SpendFunds; + + /// The maximum number of approvals that can wait in the spending queue. + type MaxApprovals: Get; } /// A trait to allow the Treasury Pallet to spend it's funds for other purposes. @@ -155,7 +162,7 @@ pub trait SpendFunds, I=DefaultInstance> { pub type ProposalIndex = u32; /// A spending proposal. -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Proposal { /// The account proposing it. @@ -179,7 +186,7 @@ decl_storage! { => Option>>; /// Proposal indices that have been approved but not yet awarded. - pub Approvals get(fn approvals): Vec; + pub Approvals get(fn approvals): BoundedVec; } add_extra_genesis { build(|_config| { @@ -187,10 +194,7 @@ decl_storage! { let account_id = >::account_id(); let min = T::Currency::minimum_balance(); if T::Currency::free_balance(&account_id) < min { - let _ = T::Currency::make_free_balance_be( - &account_id, - min, - ); + let _ = T::Currency::make_free_balance_be(&account_id, min); } }); } @@ -227,6 +231,8 @@ decl_error! { InsufficientProposersBalance, /// No proposal or bounty at that index. InvalidIndex, + /// Too many approvals in the queue. + TooManyApprovals, } } @@ -249,7 +255,7 @@ decl_module! { const Burn: Permill = T::Burn::get(); /// The treasury's module id, used for deriving its sovereign account ID. - const ModuleId: ModuleId = T::ModuleId::get(); + const PalletId: PalletId = T::PalletId::get(); type Error = Error; @@ -315,12 +321,12 @@ decl_module! { /// - DbReads: `Proposals`, `Approvals` /// - DbWrite: `Approvals` /// # - #[weight = (T::WeightInfo::approve_proposal(), DispatchClass::Operational)] + #[weight = (T::WeightInfo::approve_proposal(T::MaxApprovals::get()), DispatchClass::Operational)] pub fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { T::ApproveOrigin::ensure_origin(origin)?; ensure!(>::contains_key(proposal_id), Error::::InvalidIndex); - Approvals::::append(proposal_id); + Approvals::::try_append(proposal_id).map_err(|_| Error::::TooManyApprovals)?; } /// # @@ -349,7 +355,7 @@ impl, I: Instance> Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() + T::PalletId::get().into_account() } /// The needed bond for a proposal whose spend is `value`. @@ -367,7 +373,7 @@ impl, I: Instance> Module { let mut missed_any = false; let mut imbalance = >::zero(); - let proposals_len = Approvals::::mutate(|v| { + let proposals_len = Approvals::::mutate(|v| { let proposals_approvals_len = v.len() as u32; v.retain(|&index| { // Should always be true, but shouldn't panic if false or we're screwed. diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 45fc3e629fb0b..cb6d4903a5732 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -24,12 +24,11 @@ use super::*; use std::cell::RefCell; use frame_support::{ assert_noop, assert_ok, parameter_types, - traits::OnInitialize, + traits::OnInitialize, PalletId }; use sp_core::H256; use sp_runtime::{ - ModuleId, testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; @@ -77,6 +76,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -98,13 +98,14 @@ parameter_types! { pub const ProposalBondMinimum: u64 = 1; pub const SpendPeriod: u64 = 2; pub const Burn: Permill = Permill::from_percent(50); - pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); pub const BountyUpdatePeriod: u32 = 20; pub const BountyCuratorDeposit: Permill = Permill::from_percent(50); pub const BountyValueMinimum: u64 = 1; + pub const MaxApprovals: u32 = 100; } impl Config for Test { - type ModuleId = TreasuryModuleId; + type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; @@ -117,6 +118,7 @@ impl Config for Test { type BurnDestination = (); // Just gets burned. type WeightInfo = (); type SpendFunds = (); + type MaxApprovals = MaxApprovals; } pub fn new_test_ext() -> sp_io::TestExternalities { @@ -359,3 +361,20 @@ fn genesis_funding_works() { assert_eq!(Treasury::pot(), initial_funding - Balances::minimum_balance()); }); } + +#[test] +fn max_approvals_limited() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), u64::max_value()); + Balances::make_free_balance_be(&0, u64::max_value()); + + for _ in 0 .. MaxApprovals::get() { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::root(), 0)); + } + + // One too many will fail + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_noop!(Treasury::approve_proposal(Origin::root(), 0), Error::::TooManyApprovals); + }); +} diff --git a/frame/treasury/src/weights.rs b/frame/treasury/src/weights.rs index b8a5625bf0624..9d627f1c287e2 100644 --- a/frame/treasury/src/weights.rs +++ b/frame/treasury/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,12 +17,12 @@ //! Autogenerated weights for pallet_treasury //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-12-16, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-26, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: -// ./target/release/substrate +// target/release/substrate // benchmark // --chain=dev // --steps=50 @@ -46,7 +46,7 @@ use sp_std::marker::PhantomData; pub trait WeightInfo { fn propose_spend() -> Weight; fn reject_proposal() -> Weight; - fn approve_proposal() -> Weight; + fn approve_proposal(p: u32, ) -> Weight; fn on_initialize_proposals(p: u32, ) -> Weight; } @@ -54,24 +54,26 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn propose_spend() -> Weight { - (59_986_000 as Weight) + (45_393_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn reject_proposal() -> Weight { - (48_300_000 as Weight) + (42_796_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - fn approve_proposal() -> Weight { - (14_054_000 as Weight) + fn approve_proposal(p: u32, ) -> Weight { + (14_153_000 as Weight) + // Standard Error: 1_000 + .saturating_add((94_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn on_initialize_proposals(p: u32, ) -> Weight { - (86_038_000 as Weight) - // Standard Error: 18_000 - .saturating_add((78_781_000 as Weight).saturating_mul(p as Weight)) + (51_633_000 as Weight) + // Standard Error: 42_000 + .saturating_add((65_705_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -82,24 +84,26 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn propose_spend() -> Weight { - (59_986_000 as Weight) + (45_393_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn reject_proposal() -> Weight { - (48_300_000 as Weight) + (42_796_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - fn approve_proposal() -> Weight { - (14_054_000 as Weight) + fn approve_proposal(p: u32, ) -> Weight { + (14_153_000 as Weight) + // Standard Error: 1_000 + .saturating_add((94_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn on_initialize_proposals(p: u32, ) -> Weight { - (86_038_000 as Weight) - // Standard Error: 18_000 - .saturating_add((78_781_000 as Weight).saturating_mul(p as Weight)) + (51_633_000 as Weight) + // Standard Error: 42_000 + .saturating_add((65_705_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(p as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index f55cff4d653c5..1eb92df4ecaa6 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -31,7 +30,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-runtime/std", "frame-support/std", diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 3a8089519fac5..f13e1b6ef7789 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -111,6 +111,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index e1335237eb508..25890fea038de 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -32,7 +31,6 @@ hex-literal = "0.3.1" [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index c02e9dc78c13e..e5e6cb5069b82 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -464,6 +464,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const MaxLocks: u32 = 10; diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 2be8545a81d1d..642da2c465e66 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -282,16 +282,14 @@ fn generate_runtime_api_base_structures() -> Result { self.recorder = Some(Default::default()); } + fn proof_recorder(&self) -> Option<#crate_::ProofRecorder> { + self.recorder.clone() + } + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { self.recorder .take() - .map(|recorder| { - let trie_nodes = recorder.read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - #crate_::StorageProof::new(trie_nodes) - }) + .map(|recorder| recorder.to_storage_proof()) } fn into_storage_changes( diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 62a03a59baacd..383cd4f635ea2 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -102,6 +102,10 @@ fn implement_common_api_traits( unimplemented!("`extract_proof` not implemented for runtime api mocks") } + fn proof_recorder(&self) -> Option<#crate_::ProofRecorder<#block_type>> { + unimplemented!("`proof_recorder` not implemented for runtime api mocks") + } + fn into_storage_changes( &self, _: &Self::StateBackend, diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 2e4ccf8ff4edf..aa3c69d46a29d 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -195,7 +195,7 @@ pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { ImplItem::Method(method) => Some(&method.sig), _ => None, }) - .map(|sig| { + .flat_map(|sig| { let ret_ty = match &sig.output { ReturnType::Default => None, ReturnType::Type(_, ty) => Some((**ty).clone()), @@ -209,7 +209,6 @@ pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { _ => (**ty).clone(), }).chain(ret_ty) }) - .flatten() .collect() } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index afb9af343ba6c..155bb899a2ed5 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -362,7 +362,7 @@ pub use sp_api_proc_macro::mock_impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder>; +pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] @@ -471,6 +471,9 @@ pub trait ApiExt { /// If `record_proof` was not called before, this will return `None`. fn extract_proof(&mut self) -> Option; + /// Returns the current active proof recorder. + fn proof_recorder(&self) -> Option>; + /// Convert the api object into the storage changes that were done while executing runtime /// api functions. /// diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 76751cdee81bd..3c3b5a35c164a 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -17,6 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" +static_assertions = "1.1.0" num-traits = { version = "0.2.8", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index 9813277506c40..906c4d0cfd316 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -33,6 +33,10 @@ const SHIFT: usize = 32; /// short form of _Base_. Analogous to the value 10 in base-10 decimal numbers. const B: Double = Single::max_value() as Double + 1; +static_assertions::const_assert!( + sp_std::mem::size_of::() - sp_std::mem::size_of::() == SHIFT / 8 +); + /// Splits a [`Double`] limb number into a tuple of two [`Single`] limb numbers. pub fn split(a: Double) -> (Single, Single) { let al = a as Single; @@ -187,6 +191,7 @@ impl BigUint { let u = Double::from(self.checked_get(j).unwrap_or(0)); let v = Double::from(other.checked_get(j).unwrap_or(0)); let s = u + v + k; + // proof: any number % B will fit into `Single`. w.set(j, (s % B) as Single); k = s / B; } @@ -209,28 +214,24 @@ impl BigUint { let s = { let u = Double::from(self.checked_get(j).unwrap_or(0)); let v = Double::from(other.checked_get(j).unwrap_or(0)); - let mut needs_borrow = false; - let mut t = 0; - if let Some(v1) = u.checked_sub(v) { - if let Some(v2) = v1.checked_sub(k) { - t = v2; - k = 0; - } else { - needs_borrow = true; - } + if let Some(v2) = u.checked_sub(v).and_then(|v1| v1.checked_sub(k)) { + // no borrow is needed. u - v - k can be computed as-is + let t = v2; + k = 0; + + t } else { - needs_borrow = true; - } - if needs_borrow { - t = u + B - v - k; + // borrow is needed. Add a `B` to u, before subtracting. + // PROOF: addition: `u + B < 2*B`, thus can fit in double. + // PROOF: subtraction: if `u - v - k < 0`, then `u + B - v - k < B`. + // NOTE: the order of operations is critical to ensure underflow won't happen. + let t = u + B - v - k; k = 1; + + t } - t }; - // PROOF: t either comes from `v2`, or from `u + B - v - k`. The former is - // trivial. The latter will not overflow this branch will only happen if the sum of - // `u - v - k` part has been negative, hence `u + B - v - k < B`. w.set(j, s as Single); } @@ -264,10 +265,9 @@ impl BigUint { let mut k = 0; for i in 0..m { // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. - let t = - mul_single(self.get(j), other.get(i)) - + Double::from(w.get(i + j)) - + Double::from(k); + let t = mul_single(self.get(j), other.get(i)) + + Double::from(w.get(i + j)) + + Double::from(k); w.set(i + j, (t % B) as Single); // PROOF: (B^2 - 1) / B < B. conversion is safe. k = (t / B) as Single; @@ -580,12 +580,6 @@ pub mod tests { BigUint { digits: vec![1; n] } } - #[test] - fn shift_check() { - let shift = sp_std::mem::size_of::() - sp_std::mem::size_of::(); - assert_eq!(shift * 8, SHIFT); - } - #[test] fn split_works() { let a = SHIFT / 2; @@ -732,12 +726,14 @@ pub mod tests { let c = BigUint { digits: vec![1, 1, 2] }; let d = BigUint { digits: vec![0, 2] }; let e = BigUint { digits: vec![0, 1, 1, 2] }; + let f = BigUint { digits: vec![7, 8] }; assert!(a.clone().div(&b, true).is_none()); assert!(c.clone().div(&a, true).is_none()); assert!(c.clone().div(&d, true).is_none()); assert!(e.clone().div(&a, true).is_none()); + assert!(f.clone().div(&b, true).is_none()); assert!(c.clone().div(&b, true).is_some()); } diff --git a/primitives/arithmetic/src/fixed_point.rs b/primitives/arithmetic/src/fixed_point.rs index b837c360c7c54..ec2c28f35f1ca 100644 --- a/primitives/arithmetic/src/fixed_point.rs +++ b/primitives/arithmetic/src/fixed_point.rs @@ -57,7 +57,7 @@ pub trait FixedPointNumber: + Saturating + Bounded + Eq + PartialEq + Ord + PartialOrd + CheckedSub + CheckedAdd + CheckedMul + CheckedDiv - + Add + Sub + Div + Mul + + Add + Sub + Div + Mul + Zero + One { /// The underlying data type used for this fixed point number. type Inner: Debug + One + CheckedMul + CheckedDiv + FixedPointOperand; @@ -195,21 +195,6 @@ pub trait FixedPointNumber: Self::one().checked_div(&self) } - /// Returns zero. - fn zero() -> Self { - Self::from_inner(Self::Inner::zero()) - } - - /// Checks if the number is zero. - fn is_zero(&self) -> bool { - self.into_inner() == Self::Inner::zero() - } - - /// Returns one. - fn one() -> Self { - Self::from_inner(Self::DIV) - } - /// Checks if the number is one. fn is_one(&self) -> bool { self.into_inner() == Self::Inner::one() @@ -381,7 +366,7 @@ macro_rules! implement_fixed { } #[cfg(any(feature = "std", test))] - pub fn to_fraction(self) -> f64 { + pub fn to_float(self) -> f64 { self.0 as f64 / ::DIV as f64 } } @@ -514,6 +499,22 @@ macro_rules! implement_fixed { } } + impl Zero for $name { + fn zero() -> Self { + Self::from_inner(::Inner::zero()) + } + + fn is_zero(&self) -> bool { + self.into_inner() == ::Inner::zero() + } + } + + impl One for $name { + fn one() -> Self { + Self::from_inner(Self::DIV) + } + } + impl sp_std::fmt::Debug for $name { #[cfg(feature = "std")] fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 3d71cf63f55d1..038a28ddab351 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.21" -libp2p = { version = "0.36.0", default-features = false } +libp2p = { version = "0.37.1", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "3.0.0"} sp-inherents = { version = "3.0.0", path = "../../inherents" } @@ -34,6 +34,7 @@ parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} wasm-timer = "0.2.5" +async-trait = "0.1.42" [dev-dependencies] futures = "0.3.9" diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 9b7995a2b00bd..6e4fb98865015 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -146,7 +146,7 @@ pub struct BlockImportParams { /// Intermediate values that are interpreted by block importers. Each block importer, /// upon handling a value, removes it from the intermediate list. The final block importer /// rejects block import if there are still intermediate values that remain unhandled. - pub intermediates: HashMap, Box>, + pub intermediates: HashMap, Box>, /// Auxiliary consensus data produced by the block. /// Contains a list of key-value pairs. If values are `None`, the keys /// will be deleted. @@ -237,13 +237,10 @@ impl BlockImportParams { pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; - match v.downcast::() { - Ok(v) => Ok(v), - Err(v) => { + v.downcast::().or_else(|v| { self.intermediates.insert(k, v); Err(Error::InvalidIntermediate) - }, - } + }) } /// Get a reference to a given intermediate. @@ -264,14 +261,15 @@ impl BlockImportParams { } /// Block import trait. +#[async_trait::async_trait] pub trait BlockImport { /// The error type. type Error: std::error::Error + Send + 'static; /// The transaction type used by the backend. - type Transaction; + type Transaction: Send + 'static; /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result; @@ -279,56 +277,64 @@ pub trait BlockImport { /// Import a block. /// /// Cached data can be accessed through the blockchain cache. - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result; } -impl BlockImport for crate::import_queue::BoxBlockImport { +#[async_trait::async_trait] +impl BlockImport for crate::import_queue::BoxBlockImport + where + Transaction: Send + 'static, +{ type Error = crate::error::Error; type Transaction = Transaction; /// Check block preconditions. - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (**self).check_block(block) + (**self).check_block(block).await } /// Import a block. /// /// Cached data can be accessed through the blockchain cache. - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result { - (**self).import_block(block, cache) + (**self).import_block(block, cache).await } } +#[async_trait::async_trait] impl BlockImport for Arc - where for<'r> &'r T: BlockImport + where + for<'r> &'r T: BlockImport, + T: Send + Sync, + Transaction: Send + 'static, { type Error = E; type Transaction = Transaction; - fn check_block( + async fn check_block( &mut self, block: BlockCheckParams, ) -> Result { - (&**self).check_block(block) + (&**self).check_block(block).await } - fn import_block( + async fn import_block( &mut self, block: BlockImportParams, cache: HashMap>, ) -> Result { - (&**self).import_block(block, cache) + (&**self).import_block(block, cache).await } } diff --git a/primitives/consensus/common/src/evaluation.rs b/primitives/consensus/common/src/evaluation.rs index be930fa4a0016..c18c8b127f991 100644 --- a/primitives/consensus/common/src/evaluation.rs +++ b/primitives/consensus/common/src/evaluation.rs @@ -39,9 +39,6 @@ pub enum Error { /// Proposal had wrong number. #[error("Proposal had wrong number. Expected {expected:?}, got {got:?}")] WrongNumber { expected: BlockNumber, got: BlockNumber }, - /// Proposal exceeded the maximum size. - #[error("Proposal size {block_size} exceeds maximum allowed size of {max_block_size}.")] - ProposalTooLarge { block_size: usize, max_block_size: usize }, } /// Attempt to evaluate a substrate block as a node block, returning error @@ -50,17 +47,12 @@ pub fn evaluate_initial( proposal: &Block, parent_hash: &::Hash, parent_number: <::Header as HeaderT>::Number, - max_block_size: usize, ) -> Result<()> { let encoded = Encode::encode(proposal); let proposal = Block::decode(&mut &encoded[..]) .map_err(|e| Error::BadProposalFormat(e))?; - if encoded.len() > max_block_size { - return Err(Error::ProposalTooLarge { max_block_size, block_size: encoded.len() }) - } - if *parent_hash != *proposal.header().parent_hash() { return Err(Error::WrongParentHash { expected: format!("{:?}", *parent_hash), diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index b6067645a8920..4220c7b14162d 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -82,11 +82,12 @@ pub struct IncomingBlock { pub type CacheKeyId = [u8; 4]; /// Verify a justification of a block +#[async_trait::async_trait] pub trait Verifier: Send + Sync { /// Verify the given data and return the BlockImportParams and an optional /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: B::Header, @@ -163,18 +164,18 @@ pub enum BlockImportError { } /// Single block import function. -pub fn import_single_block, Transaction>( - import_handle: &mut dyn BlockImport, +pub async fn import_single_block, Transaction: Send + 'static>( + import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, ) -> Result>, BlockImportError> { - import_single_block_metered(import_handle, block_origin, block, verifier, None) + import_single_block_metered(import_handle, block_origin, block, verifier, None).await } /// Single block import function with metering. -pub(crate) fn import_single_block_metered, Transaction>( - import_handle: &mut dyn BlockImport, +pub(crate) async fn import_single_block_metered, Transaction: Send + 'static>( + import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, @@ -232,24 +233,28 @@ pub(crate) fn import_single_block_metered, Transaction parent_hash, allow_missing_state: block.allow_missing_state, import_existing: block.import_existing, - }))? { + }).await)? { BlockImportResult::ImportedUnknown { .. } => (), r => return Ok(r), // Any other successful result means that the block is already imported. } let started = wasm_timer::Instant::now(); - let (mut import_block, maybe_keys) = verifier.verify(block_origin, header, justifications, block.body) - .map_err(|msg| { - if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); - } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); - } - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(false, started.elapsed()); - } - BlockImportError::VerificationFailed(peer.clone(), msg) - })?; + let (mut import_block, maybe_keys) = verifier.verify( + block_origin, + header, + justifications, + block.body + ).await.map_err(|msg| { + if let Some(ref peer) = peer { + trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + } else { + trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + } + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(false, started.elapsed()); + } + BlockImportError::VerificationFailed(peer.clone(), msg) + })?; if let Some(metrics) = metrics.as_ref() { metrics.report_verification(true, started.elapsed()); @@ -261,7 +266,7 @@ pub(crate) fn import_single_block_metered, Transaction } import_block.allow_missing_state = block.allow_missing_state; - let imported = import_handle.import_block(import_block.convert_transaction(), cache); + let imported = import_handle.import_block(import_block.convert_transaction(), cache).await; if let Some(metrics) = metrics.as_ref() { metrics.report_verification_and_import(started.elapsed()); } diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index eb2b4b1fa7fcd..7998ba1b3ec76 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -155,7 +155,7 @@ mod worker_messages { /// to be run. /// /// Returns when `block_import` ended. -async fn block_import_process( +async fn block_import_process( mut block_import: BoxBlockImport, mut verifier: impl Verifier, mut result_sender: BufferedLinkSender, @@ -195,7 +195,7 @@ struct BlockImportWorker { } impl BlockImportWorker { - fn new, Transaction: Send>( + fn new, Transaction: Send + 'static>( result_sender: BufferedLinkSender, verifier: V, block_import: BoxBlockImport, @@ -322,7 +322,7 @@ struct ImportManyBlocksResult { /// Import several blocks at once, returning import result for each block. /// /// This will yield after each imported block once, to ensure that other futures can be called as well. -async fn import_many_blocks, Transaction>( +async fn import_many_blocks, Transaction: Send + 'static>( import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, blocks: Vec>, @@ -371,7 +371,7 @@ async fn import_many_blocks, Transaction>( block, verifier, metrics.clone(), - ) + ).await }; if let Some(metrics) = metrics.as_ref() { @@ -439,8 +439,9 @@ mod tests { use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; use std::collections::HashMap; + #[async_trait::async_trait] impl Verifier for () { - fn verify( + async fn verify( &mut self, origin: BlockOrigin, header: Header, @@ -451,18 +452,19 @@ mod tests { } } + #[async_trait::async_trait] impl BlockImport for () { type Error = crate::Error; type Transaction = Extrinsic; - fn check_block( + async fn check_block( &mut self, _block: BlockCheckParams, ) -> Result { Ok(ImportResult::imported(false)) } - fn import_block( + async fn import_block( &mut self, _block: BlockImportParams, _cache: HashMap>, diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 27a43dbe02208..642b6b12e7d6f 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -196,6 +196,13 @@ pub trait Proposer { /// a maximum duration for building this proposal is given. If building the proposal takes /// longer than this maximum, the proposal will be very likely discarded. /// + /// If `block_size_limit` is given, the proposer should push transactions until the block size + /// limit is hit. Depending on the `finalize_block` implementation of the runtime, it probably + /// incorporates other operations (that are happening after the block limit is hit). So, + /// when the block size estimation also includes a proof that is recorded alongside the block + /// production, the proof can still grow. This means that the `block_size_limit` should not be + /// the hard limit of what is actually allowed. + /// /// # Return /// /// Returns a future that resolves to a [`Proposal`] or to [`Error`]. @@ -204,6 +211,7 @@ pub trait Proposer { inherent_data: InherentData, inherent_digests: DigestFor, max_duration: Duration, + block_size_limit: Option, ) -> Self::Proposal; } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 95192acc4cb17..3479fc28c6358 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -586,8 +586,11 @@ ss58_address_format!( (65, "aventus", "Aventus Chain mainnet, standard account (*25519).") CrustAccount => (66, "crust", "Crust Network, standard account (*25519).") + SoraAccount => + (69, "sora", "SORA Network, standard account (*25519).") + SocialAccount => + (252, "social-network", "Social Network, standard account (*25519).") // Note: 16384 and above are reserved. - ); /// Set the default "version" (actually, this is a bit of a misnomer and the version byte is diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index c72f38ea0827e..8f97d59f21942 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -148,6 +148,12 @@ impl Deref for Bytes { fn deref(&self) -> &[u8] { &self.0[..] } } +impl codec::WrapperTypeEncode for Bytes {} + +impl codec::WrapperTypeDecode for Bytes { + type Wrapped = Vec; +} + #[cfg(feature = "std")] impl sp_std::str::FromStr for Bytes { type Err = bytes::FromHexError; @@ -275,7 +281,7 @@ pub trait TypeId { /// A log level matching the one from `log` crate. /// -/// Used internally by `sp_io::log` method. +/// Used internally by `sp_io::logging::log` method. #[derive(Encode, Decode, PassByEnum, Copy, Clone)] pub enum LogLevel { /// `Error` log level. @@ -328,6 +334,53 @@ impl From for log::Level { } } +/// Log level filter that expresses which log levels should be filtered. +/// +/// This enum matches the [`log::LogLevelFilter`] enum. +#[derive(Encode, Decode, PassByEnum, Copy, Clone)] +pub enum LogLevelFilter { + /// `Off` log level filter. + Off = 0, + /// `Error` log level filter. + Error = 1, + /// `Warn` log level filter. + Warn = 2, + /// `Info` log level filter. + Info = 3, + /// `Debug` log level filter. + Debug = 4, + /// `Trace` log level filter. + Trace = 5, +} + +impl From for log::LevelFilter { + fn from(l: LogLevelFilter) -> Self { + use self::LogLevelFilter::*; + match l { + Off => Self::Off, + Error => Self::Error, + Warn => Self::Warn, + Info => Self::Info, + Debug => Self::Debug, + Trace => Self::Trace, + } + } +} + +impl From for LogLevelFilter { + fn from(l: log::LevelFilter) -> Self { + use log::LevelFilter::*; + match l { + Off => Self::Off, + Error => Self::Error, + Warn => Self::Warn, + Info => Self::Info, + Debug => Self::Debug, + Trace => Self::Trace, + } + } +} + /// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`. /// /// When Substrate calls into Wasm it expects a fixed signature for functions exported diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 1077f41048d59..ce5a0990d738d 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -281,6 +281,16 @@ pub trait Externalities: ExtensionStore { /// /// Adds new storage keys to the DB tracking whitelist. fn set_whitelist(&mut self, new: Vec); + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Returns estimated proof size for the state queries so far. + /// Proof is reset on commit and wipe. + fn proof_size(&self) -> Option { + None + } } /// Extension for the [`Externalities`] trait. @@ -302,7 +312,7 @@ pub trait ExternalitiesExt { impl ExternalitiesExt for &mut dyn Externalities { fn extension(&mut self) -> Option<&mut T> { - self.extension_by_type_id(TypeId::of::()).and_then(Any::downcast_mut) + self.extension_by_type_id(TypeId::of::()).and_then(::downcast_mut) } fn register_extension(&mut self, ext: T) -> Result<(), Error> { diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index 0110db5680a18..facc620810468 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -423,8 +423,10 @@ pub trait ProvideInherent { /// /// - `Err(_)` indicates that this function failed and further operations should be aborted. /// - /// CAUTION: This check has a bug when used in pallets that also provide unsigned transactions. - /// See for details. + /// NOTE: If inherent is required then the runtime asserts that the block contains at least + /// one inherent for which: + /// * type is [`Self::Call`], + /// * [`Self::is_inherent`] returns true. fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } /// Check whether the given inherent is valid. Checking the inherent is optional and can be @@ -433,9 +435,24 @@ pub trait ProvideInherent { /// When checking an inherent, the first parameter represents the inherent that is actually /// included in the block by its author. Whereas the second parameter represents the inherent /// data that the verifying node calculates. + /// + /// NOTE: A block can contains multiple inherent. fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { Ok(()) } + + /// Return whether the call is an inherent call. + /// + /// NOTE: Signed extrinsics are not inherent, but signed extrinsic with the given call variant + /// can be dispatched. + /// + /// # Warning + /// + /// In FRAME, inherent are enforced to be before other extrinsics, for this reason, + /// pallets with unsigned transactions **must ensure** that no unsigned transaction call + /// is an inherent call, when implementing `ValidateUnsigned::validate_unsigned`. + /// Otherwise block producer can produce invalid blocks by including them after non inherent. + fn is_inherent(call: &Self::Call) -> bool; } #[cfg(test)] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 7a1313490db9f..b03d9da3fd2a9 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -47,7 +47,7 @@ use sp_core::{ use sp_keystore::{KeystoreExt, SyncCryptoStore}; use sp_core::{ - OpaquePeerId, crypto::KeyTypeId, ed25519, sr25519, ecdsa, H256, LogLevel, + OpaquePeerId, crypto::KeyTypeId, ed25519, sr25519, ecdsa, H256, LogLevel, LogLevelFilter, offchain::{ Timestamp, HttpRequestId, HttpRequestStatus, HttpError, StorageKind, OpaqueNetworkState, }, @@ -1082,6 +1082,11 @@ pub trait Logging { ) } } + + /// Returns the max log level used by the host. + fn max_level() -> LogLevelFilter { + log::max_level().into() + } } #[derive(Encode, Decode)] diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml new file mode 100644 index 0000000000000..e647606f1595a --- /dev/null +++ b/primitives/maybe-compressed-blob/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "sp-maybe-compressed-blob" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Handling of blobs, usually Wasm code, which may be compresed" +documentation = "https://docs.rs/sp-maybe-compressed-blob" +readme = "README.md" + +[target.'cfg(not(target_os = "unknown"))'.dependencies] +zstd = { version = "0.6.0", default-features = false } + +[target.'cfg(target_os = "unknown")'.dependencies] +ruzstd = { version = "0.2.2" } diff --git a/primitives/maybe-compressed-blob/README.md b/primitives/maybe-compressed-blob/README.md new file mode 100644 index 0000000000000..b5bb869c30e4f --- /dev/null +++ b/primitives/maybe-compressed-blob/README.md @@ -0,0 +1,3 @@ +Handling of blobs, typicaly validation code, which may be compressed. + +License: Apache-2.0 diff --git a/primitives/maybe-compressed-blob/src/lib.rs b/primitives/maybe-compressed-blob/src/lib.rs new file mode 100644 index 0000000000000..acd283e747f9f --- /dev/null +++ b/primitives/maybe-compressed-blob/src/lib.rs @@ -0,0 +1,166 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Handling of blobs that may be compressed, based on an 8-byte magic identifier +//! at the head. + +use std::borrow::Cow; +use std::io::Read; + +// An arbitrary prefix, that indicates a blob beginning with should be decompressed with +// Zstd compression. +// +// This differs from the WASM magic bytes, so real WASM blobs will not have this prefix. +const ZSTD_PREFIX: [u8; 8] = [82, 188, 83, 118, 70, 219, 142, 5]; + +/// A recommendation for the bomb limit for code blobs. +/// +/// This may be adjusted upwards in the future, but is set much higher than the +/// expected maximum code size. When adjusting upwards, nodes should be updated +/// before performing a runtime upgrade to a blob with larger compressed size. +pub const CODE_BLOB_BOMB_LIMIT: usize = 50 * 1024 * 1024; + +/// A possible bomb was encountered. +#[derive(Debug, Clone, PartialEq)] +pub enum Error { + /// Decoded size was too large, and the code payload may be a bomb. + PossibleBomb, + /// The compressed value had an invalid format. + Invalid, +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match *self { + Error::PossibleBomb => write!(f, "Possible compression bomb encountered"), + Error::Invalid => write!(f, "Blob had invalid format"), + } + } +} + +impl std::error::Error for Error { } + +fn read_from_decoder( + decoder: impl Read, + blob_len: usize, + bomb_limit: usize, +) -> Result, Error> { + let mut decoder = decoder.take((bomb_limit + 1) as u64); + + let mut buf = Vec::with_capacity(blob_len); + decoder.read_to_end(&mut buf).map_err(|_| Error::Invalid)?; + + if buf.len() <= bomb_limit { + Ok(buf) + } else { + Err(Error::PossibleBomb) + } +} + +#[cfg(not(target_os = "unknown"))] +fn decompress_zstd(blob: &[u8], bomb_limit: usize) -> Result, Error> { + let decoder = zstd::Decoder::new(blob).map_err(|_| Error::Invalid)?; + + read_from_decoder(decoder, blob.len(), bomb_limit) +} + +#[cfg(target_os = "unknown")] +fn decompress_zstd(mut blob: &[u8], bomb_limit: usize) -> Result, Error> { + let blob_len = blob.len(); + let decoder = ruzstd::streaming_decoder::StreamingDecoder::new(&mut blob) + .map_err(|_| Error::Invalid)?; + + read_from_decoder(decoder, blob_len, bomb_limit) +} + +/// Decode a blob, if it indicates that it is compressed. Provide a `bomb_limit`, which +/// is the limit of bytes which should be decompressed from the blob. +pub fn decompress(blob: &[u8], bomb_limit: usize) -> Result, Error> { + if blob.starts_with(&ZSTD_PREFIX) { + decompress_zstd(&blob[ZSTD_PREFIX.len()..], bomb_limit).map(Into::into) + } else { + Ok(blob.into()) + } +} + +/// Encode a blob as compressed. If the blob's size is over the bomb limit, +/// this will not compress the blob, as the decoder will not be able to be +/// able to differentiate it from a compression bomb. +#[cfg(not(target_os = "unknown"))] +pub fn compress(blob: &[u8], bomb_limit: usize) -> Option> { + use std::io::Write; + + if blob.len() > bomb_limit { + return None; + } + + let mut buf = ZSTD_PREFIX.to_vec(); + + { + let mut v = zstd::Encoder::new(&mut buf, 3).ok()?.auto_finish(); + v.write_all(blob).ok()?; + } + + Some(buf) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::io::Write; + + const BOMB_LIMIT: usize = 10; + + #[test] + fn refuse_to_encode_over_limit() { + let mut v = vec![0; BOMB_LIMIT + 1]; + assert!(compress(&v, BOMB_LIMIT).is_none()); + + let _ = v.pop(); + assert!(compress(&v, BOMB_LIMIT).is_some()); + } + + #[test] + fn compress_and_decompress() { + let v = vec![0; BOMB_LIMIT]; + + let compressed = compress(&v, BOMB_LIMIT).unwrap(); + + assert!(compressed.starts_with(&ZSTD_PREFIX)); + assert_eq!(&decompress(&compressed, BOMB_LIMIT).unwrap()[..], &v[..]) + } + + #[test] + fn decompresses_only_when_magic() { + let v = vec![0; BOMB_LIMIT + 1]; + + assert_eq!(&decompress(&v, BOMB_LIMIT).unwrap()[..], &v[..]); + } + + #[test] + fn possible_bomb_fails() { + let encoded_bigger_than_bomb = vec![0; BOMB_LIMIT + 1]; + let mut buf = ZSTD_PREFIX.to_vec(); + + { + let mut v = zstd::Encoder::new(&mut buf, 3).unwrap().auto_finish(); + v.write_all(&encoded_bigger_than_bomb[..]).unwrap(); + } + + assert_eq!(decompress(&buf[..], BOMB_LIMIT).err(), Some(Error::PossibleBomb)); + } +} diff --git a/primitives/npos-elections/compact/Cargo.toml b/primitives/npos-elections/compact/Cargo.toml index e2fff8e2db010..63432a36efc80 100644 --- a/primitives/npos-elections/compact/Cargo.toml +++ b/primitives/npos-elections/compact/Cargo.toml @@ -19,3 +19,9 @@ syn = { version = "1.0.58", features = ["full", "visit"] } quote = "1.0" proc-macro2 = "1.0.6" proc-macro-crate = "1.0.0" + +[dev-dependencies] +parity-scale-codec = "2.0.1" +sp-arithmetic = { path = "../../arithmetic" } +sp-npos-elections = { path = ".." } +trybuild = "1.0.41" diff --git a/primitives/npos-elections/compact/src/lib.rs b/primitives/npos-elections/compact/src/lib.rs index dd6d4de9b0241..e558ae89ca93e 100644 --- a/primitives/npos-elections/compact/src/lib.rs +++ b/primitives/npos-elections/compact/src/lib.rs @@ -52,8 +52,14 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// For example, the following generates a public struct with name `TestSolution` with `u16` voter /// type, `u8` target type and `Perbill` accuracy with maximum of 8 edges per voter. /// -/// ```ignore -/// generate_solution_type!(pub struct TestSolution::(8)) +/// ``` +/// # use sp_npos_elections_compact::generate_solution_type; +/// # use sp_arithmetic::per_things::Perbill; +/// generate_solution_type!(pub struct TestSolution::< +/// VoterIndex = u16, +/// TargetIndex = u8, +/// Accuracy = Perbill, +/// >(8)); /// ``` /// /// The given struct provides function to convert from/to Assignment: @@ -65,11 +71,13 @@ pub(crate) fn syn_err(message: &'static str) -> syn::Error { /// lead to many 0s in the solution. If prefixed with `#[compact]`, then a custom compact encoding /// for numbers will be used, similar to how `parity-scale-codec`'s `Compact` works. /// -/// ```ignore +/// ``` +/// # use sp_npos_elections_compact::generate_solution_type; +/// # use sp_arithmetic::per_things::Perbill; /// generate_solution_type!( /// #[compact] -/// pub struct TestSolutionCompact::(8) -/// ) +/// pub struct TestSolutionCompact::(8) +/// ); /// ``` #[proc_macro] pub fn generate_solution_type(item: TokenStream) -> TokenStream { @@ -386,7 +394,7 @@ fn check_compact_attr(input: ParseStream) -> Result { } } -/// #[compact] pub struct CompactName::() +/// #[compact] pub struct CompactName::() impl Parse for SolutionDef { fn parse(input: ParseStream) -> syn::Result { // optional #[compact] @@ -405,9 +413,22 @@ impl Parse for SolutionDef { return Err(syn_err("Must provide 3 generic args.")) } - let mut types: Vec = generics.args.iter().map(|t| + let expected_types = ["VoterIndex", "TargetIndex", "Accuracy"]; + + let mut types: Vec = generics.args.iter().zip(expected_types.iter()).map(|(t, expected)| match t { - syn::GenericArgument::Type(ty) => Ok(ty.clone()), + syn::GenericArgument::Type(ty) => { + // this is now an error + Err(syn::Error::new_spanned(ty, format!("Expected binding: `{} = ...`", expected))) + }, + syn::GenericArgument::Binding(syn::Binding{ident, ty, ..}) => { + // check that we have the right keyword for this position in the argument list + if ident == expected { + Ok(ty.clone()) + } else { + Err(syn::Error::new_spanned(ident, format!("Expected `{}`", expected))) + } + } _ => Err(syn_err("Wrong type of generic provided. Must be a `type`.")), } ).collect::>()?; @@ -436,3 +457,12 @@ impl Parse for SolutionDef { fn field_name_for(n: usize) -> Ident { Ident::new(&format!("{}{}", PREFIX, n), Span::call_site()) } + +#[cfg(test)] +mod tests { + #[test] + fn ui_fail() { + let cases = trybuild::TestCases::new(); + cases.compile_fail("tests/ui/fail/*.rs"); + } +} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.rs b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.rs new file mode 100644 index 0000000000000..4bbf4960a9483 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + VoterIndex = u16, + TargetIndex = u8, + Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.stderr b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.stderr new file mode 100644 index 0000000000000..b6bb8f39ede61 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_accuracy.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `Accuracy = ...` + --> $DIR/missing_accuracy.rs:6:2 + | +6 | Perbill, + | ^^^^^^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_target.rs b/primitives/npos-elections/compact/tests/ui/fail/missing_target.rs new file mode 100644 index 0000000000000..7d7584340713c --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_target.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + VoterIndex = u16, + u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_target.stderr b/primitives/npos-elections/compact/tests/ui/fail/missing_target.stderr new file mode 100644 index 0000000000000..d0c92c5bbd8e9 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_target.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `TargetIndex = ...` + --> $DIR/missing_target.rs:5:2 + | +5 | u8, + | ^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_voter.rs b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.rs new file mode 100644 index 0000000000000..3ad77dc104ad7 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + u16, + TargetIndex = u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/missing_voter.stderr b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.stderr new file mode 100644 index 0000000000000..a825d460c2fa8 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/missing_voter.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `VoterIndex = ...` + --> $DIR/missing_voter.rs:4:2 + | +4 | u16, + | ^^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/no_annotations.rs b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.rs new file mode 100644 index 0000000000000..aaebb857b3d8d --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + u16, + u8, + Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/no_annotations.stderr b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.stderr new file mode 100644 index 0000000000000..28f1c2091546f --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/no_annotations.stderr @@ -0,0 +1,5 @@ +error: Expected binding: `VoterIndex = ...` + --> $DIR/no_annotations.rs:4:2 + | +4 | u16, + | ^^^ diff --git a/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.rs b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.rs new file mode 100644 index 0000000000000..37124256b35e4 --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.rs @@ -0,0 +1,9 @@ +use sp_npos_elections_compact::generate_solution_type; + +generate_solution_type!(pub struct TestSolution::< + TargetIndex = u16, + VoterIndex = u8, + Accuracy = Perbill, +>(8)); + +fn main() {} diff --git a/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.stderr b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.stderr new file mode 100644 index 0000000000000..5759fee7472fa --- /dev/null +++ b/primitives/npos-elections/compact/tests/ui/fail/swap_voter_target.stderr @@ -0,0 +1,5 @@ +error: Expected `VoterIndex` + --> $DIR/swap_voter_target.rs:4:2 + | +4 | TargetIndex = u16, + | ^^^^^^^^^^^ diff --git a/primitives/npos-elections/fuzzer/src/compact.rs b/primitives/npos-elections/fuzzer/src/compact.rs index 91f734bb5b7cb..a49f6a535e5f0 100644 --- a/primitives/npos-elections/fuzzer/src/compact.rs +++ b/primitives/npos-elections/fuzzer/src/compact.rs @@ -4,7 +4,11 @@ use sp_npos_elections::sp_arithmetic::Percent; use sp_runtime::codec::{Encode, Error}; fn main() { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); loop { fuzz!(|fuzzer_data: &[u8]| { let result_decoded: Result = diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 7bd8565a072fd..6304e50ec5868 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -1148,7 +1148,11 @@ mod solution_type { type TestAccuracy = Percent; - generate_solution_type!(pub struct TestSolutionCompact::(16)); + generate_solution_type!(pub struct TestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u8, + Accuracy = TestAccuracy, + >(16)); #[allow(dead_code)] mod __private { @@ -1158,7 +1162,7 @@ mod solution_type { use sp_arithmetic::Percent; generate_solution_type!( #[compact] - struct InnerTestSolutionCompact::(12) + struct InnerTestSolutionCompact::(12) ); } @@ -1166,7 +1170,11 @@ mod solution_type { fn solution_struct_works_with_and_without_compact() { // we use u32 size to make sure compact is smaller. let without_compact = { - generate_solution_type!(pub struct InnerTestSolution::(16)); + generate_solution_type!(pub struct InnerTestSolution::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); let compact = InnerTestSolution { votes1: vec![(2, 20), (4, 40)], votes2: vec![ @@ -1180,7 +1188,11 @@ mod solution_type { }; let with_compact = { - generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::(16)); + generate_solution_type!(#[compact] pub struct InnerTestSolutionCompact::< + VoterIndex = u32, + TargetIndex = u32, + Accuracy = Percent, + >(16)); let compact = InnerTestSolutionCompact { votes1: vec![(2, 20), (4, 40)], votes2: vec![ diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index 09ca9a9c46af1..06f7f2c7e3f05 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -40,7 +40,7 @@ impl<'a> PiecewiseLinear<'a> { { let n = n.min(d.clone()); - if self.points.len() == 0 { + if self.points.is_empty() { return N::zero() } diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index dcdd90f4a6397..8594393c7cdea 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -40,7 +40,7 @@ pub struct Digest { impl Default for Digest { fn default() -> Self { - Digest { logs: Vec::new(), } + Self { logs: Vec::new(), } } } @@ -71,7 +71,6 @@ impl Digest { } } - /// Digest item that is able to encode/decode 'system' digest items and /// provide opaque access to other items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] @@ -209,14 +208,14 @@ pub enum OpaqueDigestItemId<'a> { impl DigestItem { /// Returns a 'referencing view' for this digest item. - pub fn dref<'a>(&'a self) -> DigestItemRef<'a, Hash> { + pub fn dref(&self) -> DigestItemRef { match *self { - DigestItem::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), - DigestItem::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), - DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), - DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), - DigestItem::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), - DigestItem::Other(ref v) => DigestItemRef::Other(v), + Self::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), + Self::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), + Self::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), + Self::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), + Self::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), + Self::Other(ref v) => DigestItemRef::Other(v), } } @@ -298,25 +297,25 @@ impl Decode for DigestItem { fn decode(input: &mut I) -> Result { let item_type: DigestItemType = Decode::decode(input)?; match item_type { - DigestItemType::ChangesTrieRoot => Ok(DigestItem::ChangesTrieRoot( + DigestItemType::ChangesTrieRoot => Ok(Self::ChangesTrieRoot( Decode::decode(input)?, )), DigestItemType::PreRuntime => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::PreRuntime(vals.0, vals.1)) + Ok(Self::PreRuntime(vals.0, vals.1)) }, DigestItemType::Consensus => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::Consensus(vals.0, vals.1)) + Ok(Self::Consensus(vals.0, vals.1)) } DigestItemType::Seal => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::Seal(vals.0, vals.1)) + Ok(Self::Seal(vals.0, vals.1)) }, - DigestItemType::ChangesTrieSignal => Ok(DigestItem::ChangesTrieSignal( + DigestItemType::ChangesTrieSignal => Ok(Self::ChangesTrieSignal( Decode::decode(input)?, )), - DigestItemType::Other => Ok(DigestItem::Other( + DigestItemType::Other => Ok(Self::Other( Decode::decode(input)?, )), } @@ -327,7 +326,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `ChangesTrieRoot`. pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { match *self { - DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), + Self::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), _ => None, } } @@ -335,7 +334,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `PreRuntime` pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { - DigestItemRef::PreRuntime(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), + Self::PreRuntime(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), _ => None, } } @@ -343,7 +342,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `Consensus` pub fn as_consensus(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { - DigestItemRef::Consensus(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), + Self::Consensus(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), _ => None, } } @@ -351,7 +350,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `Seal` pub fn as_seal(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { - DigestItemRef::Seal(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), + Self::Seal(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), _ => None, } } @@ -359,7 +358,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `ChangesTrieSignal`. pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { match *self { - DigestItemRef::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), + Self::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), _ => None, } } @@ -367,7 +366,7 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// Cast this digest item into `PreRuntime` pub fn as_other(&self) -> Option<&'a [u8]> { match *self { - DigestItemRef::Other(ref data) => Some(data), + Self::Other(ref data) => Some(data), _ => None, } } @@ -376,11 +375,11 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { /// return the opaque data it contains. pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { match (id, self) { - (OpaqueDigestItemId::Consensus(w), &DigestItemRef::Consensus(v, s)) | - (OpaqueDigestItemId::Seal(w), &DigestItemRef::Seal(v, s)) | - (OpaqueDigestItemId::PreRuntime(w), &DigestItemRef::PreRuntime(v, s)) + (OpaqueDigestItemId::Consensus(w), &Self::Consensus(v, s)) | + (OpaqueDigestItemId::Seal(w), &Self::Seal(v, s)) | + (OpaqueDigestItemId::PreRuntime(w), &Self::PreRuntime(v, s)) if v == w => Some(&s[..]), - (OpaqueDigestItemId::Other, &DigestItemRef::Other(s)) => Some(&s[..]), + (OpaqueDigestItemId::Other, &Self::Other(s)) => Some(&s[..]), _ => None, } } @@ -432,27 +431,27 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { let mut v = Vec::new(); match *self { - DigestItemRef::ChangesTrieRoot(changes_trie_root) => { + Self::ChangesTrieRoot(changes_trie_root) => { DigestItemType::ChangesTrieRoot.encode_to(&mut v); changes_trie_root.encode_to(&mut v); }, - DigestItemRef::Consensus(val, data) => { + Self::Consensus(val, data) => { DigestItemType::Consensus.encode_to(&mut v); (val, data).encode_to(&mut v); }, - DigestItemRef::Seal(val, sig) => { + Self::Seal(val, sig) => { DigestItemType::Seal.encode_to(&mut v); (val, sig).encode_to(&mut v); }, - DigestItemRef::PreRuntime(val, data) => { + Self::PreRuntime(val, data) => { DigestItemType::PreRuntime.encode_to(&mut v); (val, data).encode_to(&mut v); }, - DigestItemRef::ChangesTrieSignal(changes_trie_signal) => { + Self::ChangesTrieSignal(changes_trie_signal) => { DigestItemType::ChangesTrieSignal.encode_to(&mut v); changes_trie_signal.encode_to(&mut v); }, - DigestItemRef::Other(val) => { + Self::Other(val) => { DigestItemType::Other.encode_to(&mut v); val.encode_to(&mut v); }, @@ -466,7 +465,7 @@ impl ChangesTrieSignal { /// Try to cast this signal to NewConfiguration. pub fn as_new_configuration(&self) -> Option<&Option> { match self { - ChangesTrieSignal::NewConfiguration(config) => Some(config), + Self::NewConfiguration(config) => Some(config), } } } @@ -488,7 +487,7 @@ mod tests { }; assert_eq!( - ::serde_json::to_string(&digest).unwrap(), + serde_json::to_string(&digest).unwrap(), r#"{"logs":["0x0204000000","0x000c010203","0x05746573740c010203"]}"# ); } diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index 5bee170048b5f..fbda688cc407a 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -72,36 +72,33 @@ impl Era { let quantize_factor = (period >> 12).max(1); let quantized_phase = phase / quantize_factor * quantize_factor; - Era::Mortal(period, quantized_phase) + Self::Mortal(period, quantized_phase) } /// Create an "immortal" transaction. pub fn immortal() -> Self { - Era::Immortal + Self::Immortal } /// `true` if this is an immortal transaction. pub fn is_immortal(&self) -> bool { - match self { - Era::Immortal => true, - _ => false, - } + matches!(self, Self::Immortal) } /// Get the block number of the start of the era whose properties this object /// describes that `current` belongs to. pub fn birth(self, current: u64) -> u64 { match self { - Era::Immortal => 0, - Era::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, + Self::Immortal => 0, + Self::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, } } /// Get the block number of the first block at which the era has ended. pub fn death(self, current: u64) -> u64 { match self { - Era::Immortal => u64::max_value(), - Era::Mortal(period, _) => self.birth(current) + period, + Self::Immortal => u64::max_value(), + Self::Mortal(period, _) => self.birth(current) + period, } } } @@ -109,8 +106,8 @@ impl Era { impl Encode for Era { fn encode_to(&self, output: &mut T) { match self { - Era::Immortal => output.push_byte(0), - Era::Mortal(period, phase) => { + Self::Immortal => output.push_byte(0), + Self::Mortal(period, phase) => { let quantize_factor = (*period as u64 >> 12).max(1); let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; encoded.encode_to(output); @@ -125,14 +122,14 @@ impl Decode for Era { fn decode(input: &mut I) -> Result { let first = input.read_byte()?; if first == 0 { - Ok(Era::Immortal) + Ok(Self::Immortal) } else { let encoded = first as u64 + ((input.read_byte()? as u64) << 8); let period = 2 << (encoded % (1 << 4)); let quantize_factor = (period >> 12).max(1); let phase = (encoded >> 4) * quantize_factor; if period >= 4 && phase < period { - Ok(Era::Mortal(period, phase)) + Ok(Self::Mortal(period, phase)) } else { Err("Invalid period and phase".into()) } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 62f9908fbe58d..69c5f50796886 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -91,7 +91,7 @@ impl Decode for Header where Hash::Output: Decode, { fn decode(input: &mut I) -> Result { - Ok(Header { + Ok(Self { parent_hash: Decode::decode(input)?, number: <::Type>::decode(input)?.into(), state_root: Decode::decode(input)?, @@ -160,7 +160,7 @@ impl traits::Header for Header where parent_hash: Self::Hash, digest: Digest, ) -> Self { - Header { + Self { number, extrinsics_root, state_root, diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index 5c87d2715509d..d6164d0b51cc2 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -70,7 +70,7 @@ impl signature: Signature, extra: Extra ) -> Self { - UncheckedExtrinsic { + Self { signature: Some((signed, signature, extra)), function, } @@ -78,7 +78,7 @@ impl /// New instance of an unsigned extrinsic aka "inherent". pub fn new_unsigned(function: Call) -> Self { - UncheckedExtrinsic { + Self { signature: None, function, } @@ -102,9 +102,9 @@ impl Extrinsic fn new(function: Call, signed_data: Option) -> Option { Some(if let Some((address, signature, extra)) = signed_data { - UncheckedExtrinsic::new_signed(function, address, signature, extra) + Self::new_signed(function, address, signature, extra) } else { - UncheckedExtrinsic::new_unsigned(function) + Self::new_unsigned(function) }) } } @@ -238,7 +238,7 @@ where return Err("Invalid transaction version".into()); } - Ok(UncheckedExtrinsic { + Ok(Self { signature: if is_signed { Some(Decode::decode(input)?) } else { None }, function: Decode::decode(input)?, }) @@ -327,7 +327,7 @@ where Extra: SignedExtension, { fn from(extrinsic: UncheckedExtrinsic) -> Self { - OpaqueExtrinsic::from_bytes(extrinsic.encode().as_slice()) + Self::from_bytes(extrinsic.encode().as_slice()) .expect( "both OpaqueExtrinsic and UncheckedExtrinsic have encoding that is compatible with \ raw Vec encoding; qed" diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 4fb7d9c7737fb..51b89d484e6ce 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -158,14 +158,6 @@ impl From for Justifications { use traits::{Verify, Lazy}; -/// A module identifier. These are per module and should be stored in a registry somewhere. -#[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] -pub struct ModuleId(pub [u8; 8]); - -impl TypeId for ModuleId { - const TYPE_ID: [u8; 4] = *b"modl"; -} - #[cfg(feature = "std")] pub use serde::{Serialize, Deserialize, de::DeserializeOwned}; use crate::traits::IdentifyAccount; @@ -245,7 +237,7 @@ pub enum MultiSignature { impl From for MultiSignature { fn from(x: ed25519::Signature) -> Self { - MultiSignature::Ed25519(x) + Self::Ed25519(x) } } @@ -258,7 +250,7 @@ impl TryFrom for ed25519::Signature { impl From for MultiSignature { fn from(x: sr25519::Signature) -> Self { - MultiSignature::Sr25519(x) + Self::Sr25519(x) } } @@ -271,7 +263,7 @@ impl TryFrom for sr25519::Signature { impl From for MultiSignature { fn from(x: ecdsa::Signature) -> Self { - MultiSignature::Ecdsa(x) + Self::Ecdsa(x) } } @@ -284,7 +276,7 @@ impl TryFrom for ecdsa::Signature { impl Default for MultiSignature { fn default() -> Self { - MultiSignature::Ed25519(Default::default()) + Self::Ed25519(Default::default()) } } @@ -302,7 +294,7 @@ pub enum MultiSigner { impl Default for MultiSigner { fn default() -> Self { - MultiSigner::Ed25519(Default::default()) + Self::Ed25519(Default::default()) } } @@ -317,9 +309,9 @@ impl> crypto::UncheckedFrom for MultiSigner { impl AsRef<[u8]> for MultiSigner { fn as_ref(&self) -> &[u8] { match *self { - MultiSigner::Ed25519(ref who) => who.as_ref(), - MultiSigner::Sr25519(ref who) => who.as_ref(), - MultiSigner::Ecdsa(ref who) => who.as_ref(), + Self::Ed25519(ref who) => who.as_ref(), + Self::Sr25519(ref who) => who.as_ref(), + Self::Ecdsa(ref who) => who.as_ref(), } } } @@ -328,16 +320,16 @@ impl traits::IdentifyAccount for MultiSigner { type AccountId = AccountId32; fn into_account(self) -> AccountId32 { match self { - MultiSigner::Ed25519(who) => <[u8; 32]>::from(who).into(), - MultiSigner::Sr25519(who) => <[u8; 32]>::from(who).into(), - MultiSigner::Ecdsa(who) => sp_io::hashing::blake2_256(&who.as_ref()[..]).into(), + Self::Ed25519(who) => <[u8; 32]>::from(who).into(), + Self::Sr25519(who) => <[u8; 32]>::from(who).into(), + Self::Ecdsa(who) => sp_io::hashing::blake2_256(who.as_ref()).into(), } } } impl From for MultiSigner { fn from(x: ed25519::Public) -> Self { - MultiSigner::Ed25519(x) + Self::Ed25519(x) } } @@ -350,7 +342,7 @@ impl TryFrom for ed25519::Public { impl From for MultiSigner { fn from(x: sr25519::Public) -> Self { - MultiSigner::Sr25519(x) + Self::Sr25519(x) } } @@ -363,7 +355,7 @@ impl TryFrom for sr25519::Public { impl From for MultiSigner { fn from(x: ecdsa::Public) -> Self { - MultiSigner::Ecdsa(x) + Self::Ecdsa(x) } } @@ -378,9 +370,9 @@ impl TryFrom for ecdsa::Public { impl std::fmt::Display for MultiSigner { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { match *self { - MultiSigner::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), - MultiSigner::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), - MultiSigner::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), + Self::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), + Self::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), + Self::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), } } } @@ -389,9 +381,9 @@ impl Verify for MultiSignature { type Signer = MultiSigner; fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { match (self, signer) { - (MultiSignature::Ed25519(ref sig), who) => sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), - (MultiSignature::Sr25519(ref sig), who) => sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), - (MultiSignature::Ecdsa(ref sig), who) => { + (Self::Ed25519(ref sig), who) => sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), + (Self::Sr25519(ref sig), who) => sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), + (Self::Ecdsa(ref sig), who) => { let m = sp_io::hashing::blake2_256(msg.get()); match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { Ok(pubkey) => @@ -424,13 +416,13 @@ impl Verify for AnySignature { impl From for AnySignature { fn from(s: sr25519::Signature) -> Self { - AnySignature(s.into()) + Self(s.into()) } } impl From for AnySignature { fn from(s: ed25519::Signature) -> Self { - AnySignature(s.into()) + Self(s.into()) } } @@ -450,7 +442,7 @@ pub type DispatchResult = sp_std::result::Result<(), DispatchError>; pub type DispatchResultWithInfo = sp_std::result::Result>; /// Reason why a dispatch call failed. -#[derive(Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +#[derive(Eq, Clone, Copy, Encode, Decode, Debug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum DispatchError { /// Some error occurred. @@ -474,6 +466,8 @@ pub enum DispatchError { ConsumerRemaining, /// There are no providers so the account cannot be created. NoProviders, + /// An error to do with tokens. + Token(TokenError), } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information about @@ -532,9 +526,52 @@ impl From for DispatchError { } } +/// Description of what went wrong when trying to complete an operation on a token. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, Debug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +pub enum TokenError { + /// Funds are unavailable. + NoFunds, + /// Account that must exist would die. + WouldDie, + /// Account cannot exist with the funds that would be given. + BelowMinimum, + /// Account cannot be created. + CannotCreate, + /// The asset in question is unknown. + UnknownAsset, + /// Funds exist but are frozen. + Frozen, + /// An underflow would occur. + Underflow, + /// An overflow would occur. + Overflow, +} + +impl From for &'static str { + fn from(e: TokenError) -> &'static str { + match e { + TokenError::NoFunds => "Funds are unavailable", + TokenError::WouldDie => "Account that must exist would die", + TokenError::BelowMinimum => "Account cannot exist with the funds that would be given", + TokenError::CannotCreate => "Account cannot be created", + TokenError::UnknownAsset => "The asset in question is unknown", + TokenError::Frozen => "Funds exist but are frozen", + TokenError::Underflow => "An underflow would occur", + TokenError::Overflow => "An overflow would occur", + } + } +} + +impl From for DispatchError { + fn from(e: TokenError) -> DispatchError { + Self::Token(e) + } +} + impl From<&'static str> for DispatchError { fn from(err: &'static str) -> DispatchError { - DispatchError::Other(err) + Self::Other(err) } } @@ -547,6 +584,7 @@ impl From for &'static str { DispatchError::Module { message, .. } => message.unwrap_or("Unknown module error"), DispatchError::ConsumerRemaining => "Consumer remaining", DispatchError::NoProviders => "No providers", + DispatchError::Token(e) => e.into(), } } } @@ -575,6 +613,10 @@ impl traits::Printable for DispatchError { } Self::ConsumerRemaining => "Consumer remaining".print(), Self::NoProviders => "No providers".print(), + Self::Token(e) => { + "Token error: ".print(); + <&'static str>::from(*e).print(); + } } } } @@ -599,7 +641,9 @@ impl PartialEq for DispatchError { (ConsumerRemaining, ConsumerRemaining) | (NoProviders, NoProviders) => true, + (Token(l), Token(r)) => l == r, (Other(l), Other(r)) => l == r, + ( Module { index: index_l, error: error_l, .. }, Module { index: index_r, error: error_r, .. }, @@ -712,7 +756,7 @@ pub struct OpaqueExtrinsic(Vec); impl OpaqueExtrinsic { /// Convert an encoded extrinsic to an `OpaqueExtrinsic`. pub fn from_bytes(mut bytes: &[u8]) -> Result { - OpaqueExtrinsic::decode(&mut bytes) + Self::decode(&mut bytes) } } @@ -735,7 +779,6 @@ impl sp_std::fmt::Debug for OpaqueExtrinsic { } } - #[cfg(feature = "std")] impl ::serde::Serialize for OpaqueExtrinsic { fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { @@ -762,7 +805,6 @@ pub fn print(print: impl traits::Printable) { print.print(); } - /// Batching session. /// /// To be used in runtime only. Outside of runtime, just construct @@ -895,7 +937,6 @@ mod tests { assert!(multi_sig.verify(msg, &multi_signer.into_account())); } - #[test] #[should_panic(expected = "Signature verification has not been called")] fn batching_still_finishes_when_not_called_directly() { diff --git a/primitives/runtime/src/multiaddress.rs b/primitives/runtime/src/multiaddress.rs index d09cd7acaf4db..e1a4c81a5f9ae 100644 --- a/primitives/runtime/src/multiaddress.rs +++ b/primitives/runtime/src/multiaddress.rs @@ -45,9 +45,9 @@ where fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { use sp_core::hexdisplay::HexDisplay; match self { - MultiAddress::Raw(inner) => write!(f, "MultiAddress::Raw({})", HexDisplay::from(inner)), - MultiAddress::Address32(inner) => write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)), - MultiAddress::Address20(inner) => write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)), + Self::Raw(inner) => write!(f, "MultiAddress::Raw({})", HexDisplay::from(inner)), + Self::Address32(inner) => write!(f, "MultiAddress::Address32({})", HexDisplay::from(inner)), + Self::Address20(inner) => write!(f, "MultiAddress::Address20({})", HexDisplay::from(inner)), _ => write!(f, "{:?}", self), } } @@ -55,12 +55,12 @@ where impl From for MultiAddress { fn from(a: AccountId) -> Self { - MultiAddress::Id(a) + Self::Id(a) } } impl Default for MultiAddress { fn default() -> Self { - MultiAddress::Id(Default::default()) + Self::Id(Default::default()) } } diff --git a/primitives/runtime/src/offchain/storage_lock.rs b/primitives/runtime/src/offchain/storage_lock.rs index 1529de4ab591a..4bb9799678430 100644 --- a/primitives/runtime/src/offchain/storage_lock.rs +++ b/primitives/runtime/src/offchain/storage_lock.rs @@ -158,7 +158,7 @@ impl Clone for BlockAndTimeDeadline { fn clone(&self) -> Self { Self { block_number: self.block_number.clone(), - timestamp: self.timestamp.clone(), + timestamp: self.timestamp, } } } @@ -202,7 +202,7 @@ impl Default for BlockAndTime { impl Clone for BlockAndTime { fn clone(&self) -> Self { Self { - expiration_block_number_offset: self.expiration_block_number_offset.clone(), + expiration_block_number_offset: self.expiration_block_number_offset, expiration_duration: self.expiration_duration, _phantom: core::marker::PhantomData::, } @@ -386,7 +386,7 @@ impl<'a> StorageLock<'a, Time> { Self { value_ref: StorageValueRef::<'a>::persistent(key), lockable: Time { - expiration_duration: expiration_duration, + expiration_duration, }, } } diff --git a/primitives/runtime/src/runtime_logger.rs b/primitives/runtime/src/runtime_logger.rs index e27dc828cdbc6..f74704390174d 100644 --- a/primitives/runtime/src/runtime_logger.rs +++ b/primitives/runtime/src/runtime_logger.rs @@ -40,22 +40,15 @@ impl RuntimeLogger { static LOGGER: RuntimeLogger = RuntimeLogger; let _ = log::set_logger(&LOGGER); - // Set max level to `TRACE` to ensure we propagate - // all log entries to the native side that will do the - // final filtering on what should be printed. - // - // If we don't set any level, logging is disabled - // completly. - log::set_max_level(log::LevelFilter::Trace); + // Use the same max log level as used by the host. + log::set_max_level(sp_io::logging::max_level().into()); } } impl log::Log for RuntimeLogger { - fn enabled(&self, _metadata: &log::Metadata) -> bool { - // to avoid calling to host twice, we pass everything - // and let the host decide what to print. - // If someone is initializing the logger they should - // know what they are doing. + fn enabled(&self, _: &log::Metadata) -> bool { + // The final filtering is done by the host. This is not perfect, as we would still call into + // the host for log lines that will be thrown away. true } @@ -81,11 +74,13 @@ mod tests { TestClientBuilder, runtime::TestAPI, }; use sp_api::{ProvideRuntimeApi, BlockId}; + use std::{env, str::FromStr}; #[test] - fn ensure_runtime_logger_works() { - if std::env::var("RUN_TEST").is_ok() { + fn ensure_runtime_logger_respects_host_max_log_level() { + if env::var("RUN_TEST").is_ok() { sp_tracing::try_init_simple(); + log::set_max_level(log::LevelFilter::from_str(&env::var("RUST_LOG").unwrap()).unwrap()); let client = TestClientBuilder::new() .set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); @@ -93,16 +88,18 @@ mod tests { let block_id = BlockId::Number(0); runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); } else { - let executable = std::env::current_exe().unwrap(); - let output = std::process::Command::new(executable) - .env("RUN_TEST", "1") - .env("RUST_LOG", "trace") - .args(&["--nocapture", "ensure_runtime_logger_works"]) - .output() - .unwrap(); + for (level, should_print) in &[("trace", true), ("info", false)] { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_TEST", "1") + .env("RUST_LOG", level) + .args(&["--nocapture", "ensure_runtime_logger_respects_host_max_log_level"]) + .output() + .unwrap(); - let output = dbg!(String::from_utf8(output.stderr).unwrap()); - assert!(output.contains("Hey I'm runtime")); + let output = String::from_utf8(output.stderr).unwrap(); + assert!(output.contains("Hey I'm runtime") == *should_print); + } } } } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index b6d2641f01083..f473dc7028f4e 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -303,6 +303,14 @@ impl traits::Extrinsic for TestXt } } +impl traits::ExtrinsicMetadata for TestXt where + Call: Codec + Sync + Send, + Extra: SignedExtension, +{ + type SignedExtensions = Extra; + const VERSION: u8 = 0u8; +} + impl Applyable for TestXt where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, Extra: SignedExtension, diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 128c9a6eed0a0..41820d8cb4a1c 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -19,7 +19,6 @@ use sp_std::prelude::*; use sp_std::{self, marker::PhantomData, convert::{TryFrom, TryInto}, fmt::Debug}; -use sp_io; #[cfg(feature = "std")] use std::fmt::Display; #[cfg(feature = "std")] @@ -111,7 +110,7 @@ impl Verify for sp_core::ecdsa::Signature { self.as_ref(), &sp_io::hashing::blake2_256(msg.get()), ) { - Ok(pubkey) => &signer.as_ref()[..] == &pubkey[..], + Ok(pubkey) => signer.as_ref() == &pubkey[..], _ => false, } } @@ -1218,19 +1217,24 @@ macro_rules! impl_opaque_keys { )* } ) => { - $( #[ $attr ] )* - #[derive( - Default, Clone, PartialEq, Eq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - #[cfg_attr(feature = "std", derive($crate::serde::Serialize, $crate::serde::Deserialize))] - pub struct $name { - $( - $( #[ $inner_attr ] )* - pub $field: <$type as $crate::BoundToRuntimeAppPublic>::Public, - )* + $crate::paste::paste! { + #[cfg(feature = "std")] + use $crate::serde as [< __opaque_keys_serde_import__ $name >]; + $( #[ $attr ] )* + #[derive( + Default, Clone, PartialEq, Eq, + $crate::codec::Encode, + $crate::codec::Decode, + $crate::RuntimeDebug, + )] + #[cfg_attr(feature = "std", derive($crate::serde::Serialize, $crate::serde::Deserialize))] + #[cfg_attr(feature = "std", serde(crate = "__opaque_keys_serde_import__" $name))] + pub struct $name { + $( + $( #[ $inner_attr ] )* + pub $field: <$type as $crate::BoundToRuntimeAppPublic>::Public, + )* + } } impl $name { diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index b0c3e4dd031cc..0ee4b48612041 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -81,18 +81,12 @@ pub enum InvalidTransaction { impl InvalidTransaction { /// Returns if the reason for the invalidity was block resource exhaustion. pub fn exhausted_resources(&self) -> bool { - match self { - Self::ExhaustsResources => true, - _ => false, - } + matches!(self, Self::ExhaustsResources) } /// Returns if the reason for the invalidity was a mandatory call failing. pub fn was_mandatory(&self) -> bool { - match self { - Self::BadMandatory => true, - _ => false, - } + matches!(self, Self::BadMandatory) } } @@ -209,15 +203,15 @@ impl std::fmt::Display for TransactionValidityError { /// Information on a transaction's validity and, if valid, on how it relates to other transactions. pub type TransactionValidity = Result; -impl Into for InvalidTransaction { - fn into(self) -> TransactionValidity { - Err(self.into()) +impl From for TransactionValidity { + fn from(invalid_transaction: InvalidTransaction) -> Self { + Err(TransactionValidityError::Invalid(invalid_transaction)) } } -impl Into for UnknownTransaction { - fn into(self) -> TransactionValidity { - Err(self.into()) +impl From for TransactionValidity { + fn from(unknown_transaction: UnknownTransaction) -> Self { + Err(TransactionValidityError::Unknown(unknown_transaction)) } } @@ -285,7 +279,7 @@ pub struct ValidTransaction { impl Default for ValidTransaction { fn default() -> Self { - ValidTransaction { + Self { priority: 0, requires: vec![], provides: vec![], @@ -311,7 +305,7 @@ impl ValidTransaction { /// `provides` and `requires` tags, it will sum the priorities, take the minimum longevity and /// the logic *And* of the propagate flags. pub fn combine_with(mut self, mut other: ValidTransaction) -> Self { - ValidTransaction { + Self { priority: self.priority.saturating_add(other.priority), requires: { self.requires.append(&mut other.requires); self.requires }, provides: { self.provides.append(&mut other.provides); self.provides }, diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index eb1c566c6dde1..1a8892f8dd141 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -245,6 +245,11 @@ pub trait Backend: sp_std::fmt::Debug { /// Update the whitelist for tracking db reads/writes fn set_whitelist(&self, _: Vec) {} + + /// Estimate proof size + fn proof_size(&self) -> Option { + unimplemented!() + } } impl<'a, T: Backend, H: Hasher> Backend for &'a T { diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 65b7b638a9a2e..424a3c6c421a2 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -712,6 +712,10 @@ where fn set_whitelist(&mut self, new: Vec) { self.backend.set_whitelist(new) } + + fn proof_size(&self) -> Option { + self.backend.proof_size() + } } /// Implement `Encode` by forwarding the stored raw vec. diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 6b87aa12eb1af..28672659fa10c 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -17,9 +17,9 @@ //! Proving state machine backend. -use std::{sync::Arc, collections::HashMap}; +use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; -use codec::{Decode, Codec}; +use codec::{Decode, Codec, Encode}; use log::debug; use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; use sp_trie::{ @@ -109,9 +109,69 @@ impl<'a, S, H> ProvingBackendRecorder<'a, S, H> } } -/// Global proof recorder, act as a layer over a hash db for recording queried -/// data. -pub type ProofRecorder = Arc::Out, Option>>>; +#[derive(Default)] +struct ProofRecorderInner { + /// All the records that we have stored so far. + records: HashMap>, + /// The encoded size of all recorded values. + encoded_size: usize, +} + +/// Global proof recorder, act as a layer over a hash db for recording queried data. +#[derive(Clone, Default)] +pub struct ProofRecorder { + inner: Arc>>, +} + +impl ProofRecorder { + /// Record the given `key` => `val` combination. + pub fn record(&self, key: Hash, val: Option) { + let mut inner = self.inner.write(); + let encoded_size = if let Entry::Vacant(entry) = inner.records.entry(key) { + let encoded_size = val.as_ref().map(Encode::encoded_size).unwrap_or(0); + + entry.insert(val); + encoded_size + } else { + 0 + }; + + inner.encoded_size += encoded_size; + } + + /// Returns the value at the given `key`. + pub fn get(&self, key: &Hash) -> Option> { + self.inner.read().records.get(key).cloned() + } + + /// Returns the estimated encoded size of the proof. + /// + /// The estimation is maybe bigger (by in maximum 4 bytes), but never smaller than the actual + /// encoded proof. + pub fn estimate_encoded_size(&self) -> usize { + let inner = self.inner.read(); + inner.encoded_size + + codec::Compact(inner.records.len() as u32).encoded_size() + } + + /// Convert into a [`StorageProof`]. + pub fn to_storage_proof(&self) -> StorageProof { + let trie_nodes = self.inner.read() + .records + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + + StorageProof::new(trie_nodes) + } + + /// Reset the internal state. + pub fn reset(&self) { + let mut inner = self.inner.write(); + inner.records.clear(); + inner.encoded_size = 0; + } +} /// Patricia trie-based backend which also tracks all touched storage trie values. /// These can be sent to remote node and used as a proof of execution. @@ -122,7 +182,7 @@ pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ( /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { backend: &'a S, - proof_recorder: ProofRecorder, + proof_recorder: ProofRecorder, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> @@ -137,7 +197,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Create new proving backend with the given recorder. pub fn new_with_recorder( backend: &'a TrieBackend, - proof_recorder: ProofRecorder, + proof_recorder: ProofRecorder, ) -> Self { let essence = backend.essence(); let root = essence.root().clone(); @@ -150,12 +210,7 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> /// Extracting the gathered unordered proof. pub fn extract_proof(&self) -> StorageProof { - let trie_nodes = self.0.essence().backend_storage().proof_recorder - .read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - StorageProof::new(trie_nodes) + self.0.essence().backend_storage().proof_recorder.to_storage_proof() } } @@ -165,11 +220,12 @@ impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage type Overlay = S::Overlay; fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.read().get(key) { - return Ok(v.clone()); + if let Some(v) = self.proof_recorder.get(key) { + return Ok(v); } - let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.write().insert(key.clone(), backend_value.clone()); + + let backend_value = self.backend.get(key, prefix)?; + self.proof_recorder.record(key.clone(), backend_value.clone()); Ok(backend_value) } } @@ -343,8 +399,8 @@ mod tests { assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + let (trie_root, mut trie_mdb) = trie_backend.storage_root(std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(std::iter::empty()); assert_eq!(trie_root, proving_root); assert_eq!(trie_mdb.drain(), proving_mdb.drain()); } @@ -405,7 +461,7 @@ mod tests { )); let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; + let trie_root = trie.storage_root(std::iter::empty()).0; assert_eq!(in_memory_root, trie_root); (0..64).for_each(|i| assert_eq!( trie.storage(&[i]).unwrap().unwrap(), @@ -440,4 +496,35 @@ mod tests { vec![64] ); } + + #[test] + fn storage_proof_encoded_size_estimation_works() { + let trie_backend = test_trie(); + let backend = test_proving(&trie_backend); + + let check_estimation = |backend: &ProvingBackend<'_, PrefixedMemoryDB, BlakeTwo256>| { + let storage_proof = backend.extract_proof(); + let estimation = backend.0.essence() + .backend_storage() + .proof_recorder + .estimate_encoded_size(); + + assert_eq!(storage_proof.encoded_size(), estimation); + }; + + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + check_estimation(&backend); + + assert_eq!(backend.storage(b"value1").unwrap(), Some(vec![42])); + check_estimation(&backend); + + assert_eq!(backend.storage(b"value2").unwrap(), Some(vec![24])); + check_estimation(&backend); + + assert!(backend.storage(b"doesnotexist").unwrap().is_none()); + check_estimation(&backend); + + assert!(backend.storage(b"doesnotexist2").unwrap().is_none()); + check_estimation(&backend); + } } diff --git a/shell.nix b/shell.nix index 85bdce797cb83..a6a8d4187cd42 100644 --- a/shell.nix +++ b/shell.nix @@ -5,14 +5,13 @@ let rev = "57c8084c7ef41366993909c20491e359bbb90f54"; }); nixpkgs = import { overlays = [ mozillaOverlay ]; }; - rust-nightly = with nixpkgs; ((rustChannelOf { date = "2020-10-23"; channel = "nightly"; }).rust.override { + rust-nightly = with nixpkgs; ((rustChannelOf { date = "2021-03-01"; channel = "nightly"; }).rust.override { targets = [ "wasm32-unknown-unknown" ]; }); in with nixpkgs; pkgs.mkShell { buildInputs = [ clang - cmake pkg-config rust-nightly ] ++ stdenv.lib.optionals stdenv.isDarwin [ diff --git a/ss58-registry.json b/ss58-registry.json index 62ed68b7927c5..624d0256a81fe 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -477,6 +477,24 @@ "decimals": [12], "standardAccount": "*25519", "website": "https://crust.network" + }, + { + "prefix": 69, + "network": "sora", + "displayName": "SORA Network", + "symbols": ["XOR"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://sora.org" + }, + { + "prefix": 252, + "network": "social-network", + "displayName": "Social Network", + "symbols": ["NET"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://social.network" } ] } diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index df1cca2101ad7..925a69e41bb48 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -33,3 +33,4 @@ sp-keystore = { version = "0.9.0", path = "../../primitives/keystore" } sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } +async-trait = "0.1.42" diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index aa4856f6baf66..edba96d760fc2 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -43,18 +43,20 @@ pub trait ClientExt: Sized { } /// Extension trait for a test client around block importing. +#[async_trait::async_trait] pub trait ClientBlockImportExt: Sized { /// Import block to the chain. No finality. - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and make it our best block if possible. - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and finalize it. - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import_as_final(&mut self, origin: BlockOrigin, block: Block) + -> Result<(), ConsensusError>; /// Import block with justification(s), finalizes block. - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, @@ -83,38 +85,54 @@ impl ClientExt for Client } /// This implementation is required, because of the weird api requirements around `BlockImport`. +#[async_trait::async_trait] impl ClientBlockImportExt for std::sync::Arc - where for<'r> &'r T: BlockImport + where + for<'r> &'r T: BlockImport, + Transaction: Send + 'static, + T: Send + Sync, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, @@ -127,43 +145,60 @@ impl ClientBlockImportExt for std::sync::A import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } } +#[async_trait::async_trait] impl ClientBlockImportExt for Client where Self: BlockImport, + RA: Send, + B: Send + Sync, + E: Send, + >::Transaction: Send, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_best( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import_as_final( + &mut self, + origin: BlockOrigin, + block: Block, + ) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); import.finalized = true; import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + BlockImport::import_block(self, import, HashMap::new()).await.map(|_| ()) } - fn import_justified( + async fn import_justified( &mut self, origin: BlockOrigin, block: Block, @@ -176,6 +211,6 @@ impl ClientBlockImportExt for Client(backend: Arc) where @@ -57,7 +58,7 @@ pub fn test_leaves_for_backend(backend: Arc) where // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a1.hash()], @@ -69,7 +70,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); #[allow(deprecated)] assert_eq!( @@ -83,7 +84,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), @@ -96,7 +97,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a4.hash()], @@ -109,7 +110,7 @@ pub fn test_leaves_for_backend(backend: Arc) where false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash()], @@ -130,7 +131,7 @@ pub fn test_leaves_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b2.hash()], @@ -143,7 +144,7 @@ pub fn test_leaves_for_backend(backend: Arc) where false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b3.hash()], @@ -155,7 +156,7 @@ pub fn test_leaves_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b4.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()], @@ -175,7 +176,7 @@ pub fn test_leaves_for_backend(backend: Arc) where nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash()], @@ -195,7 +196,7 @@ pub fn test_leaves_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); assert_eq!( blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()], @@ -220,7 +221,7 @@ pub fn test_children_for_backend(backend: Arc) where // G -> A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -228,7 +229,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); // A2 -> A3 let a3 = client.new_block_at( @@ -236,7 +237,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a3.clone())).unwrap(); // A3 -> A4 let a4 = client.new_block_at( @@ -244,7 +245,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a4.clone())).unwrap(); // A4 -> A5 let a5 = client.new_block_at( @@ -252,7 +253,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a5.clone())).unwrap(); // A1 -> B2 let mut builder = client.new_block_at( @@ -268,7 +269,7 @@ pub fn test_children_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b2.clone())).unwrap(); // B2 -> B3 let b3 = client.new_block_at( @@ -276,7 +277,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, b3.clone())).unwrap(); // B3 -> B4 let b4 = client.new_block_at( @@ -284,7 +285,7 @@ pub fn test_children_for_backend(backend: Arc) where Default::default(), false, ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4).unwrap(); + block_on(client.import(BlockOrigin::Own, b4)).unwrap(); // // B2 -> C3 let mut builder = client.new_block_at( @@ -300,7 +301,7 @@ pub fn test_children_for_backend(backend: Arc) where nonce: 1, }).unwrap(); let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, c3.clone())).unwrap(); // A1 -> D2 let mut builder = client.new_block_at( @@ -316,7 +317,7 @@ pub fn test_children_for_backend(backend: Arc) where nonce: 0, }).unwrap(); let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, d2.clone())).unwrap(); let genesis_hash = client.chain_info().genesis_hash; @@ -349,7 +350,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A1 let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); + block_on(client.import(BlockOrigin::Own, a1.clone())).unwrap(); // A1 -> A2 let a2 = client.new_block_at( @@ -357,7 +358,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A3 let a3 = client.new_block_at( @@ -365,7 +366,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A4 let a4 = client.new_block_at( @@ -373,7 +374,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc A5 let a5 = client.new_block_at( @@ -381,7 +382,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc B2 let mut builder = client.new_block_at( @@ -397,7 +398,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc B3 let b3 = client.new_block_at( @@ -405,7 +406,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc B4 let b4 = client.new_block_at( @@ -413,7 +414,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc C3 let mut builder = client.new_block_at( @@ -429,7 +430,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc D2 let mut builder = client.new_block_at( @@ -445,7 +446,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc Err(InvalidTransaction::BadProof.into()), + Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), Extrinsic::ChangesTrieConfigUpdate(new_config) => Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), @@ -515,6 +515,7 @@ impl frame_system::Config for Runtime { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_timestamp::Config for Runtime { @@ -1018,7 +1019,7 @@ cfg_if! { } fn do_trace_log() { - log::error!("Hey I'm runtime: {}", log::STATIC_MAX_LEVEL); + log::trace!("Hey I'm runtime: {}", log::STATIC_MAX_LEVEL); } } @@ -1261,7 +1262,7 @@ mod tests { (BlockId::Hash(hash), block) }; - client.import(BlockOrigin::Own, block).unwrap(); + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); // Allocation of 1024k while having ~2048k should succeed. let ret = client.runtime_api().vec_with_capacity(&new_block_id, 1048576); diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml index cac699854f828..9e1f9fee02189 100644 --- a/test-utils/test-runner/Cargo.toml +++ b/test-utils/test-runner/Cargo.toml @@ -43,19 +43,10 @@ sp-runtime-interface = { version = "3.0.0", path = "../../primitives/runtime-int # pallets frame-system = { version = "3.0.0", path = "../../frame/system" } -parity-scale-codec = "1.3.1" env_logger = "0.7.1" log = "0.4.8" -futures01 = { package = "futures", version = "0.1.29" } futures = { package = "futures", version = "0.3", features = ["compat"] } -rand = "0.7" tokio = { version = "0.2", features = ["full"] } -libp2p = "0.35.1" # Calling RPC jsonrpc-core = "15.1" -[dev-dependencies] -sc-finality-grandpa = { version = "0.9.0", path = "../../client/finality-grandpa" } -sc-consensus-babe = { version = "0.9.0", path = "../../client/consensus/babe" } -sp-consensus-babe = { version = "0.9.0", path = "../../primitives/consensus/babe" } -node-cli = { version = "2.0.0", path = "../../bin/node/cli" } \ No newline at end of file diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs index 87ec4336d9523..f76083d281724 100644 --- a/test-utils/test-runner/src/lib.rs +++ b/test-utils/test-runner/src/lib.rs @@ -228,7 +228,7 @@ use manual_seal::consensus::ConsensusDataProvider; use sc_executor::NativeExecutionDispatch; -use sc_service::{Configuration, TFullBackend, TFullClient, TaskManager}; +use sc_service::{Configuration, TFullBackend, TFullClient, TaskManager, TaskExecutor}; use sp_api::{ConstructRuntimeApi, TransactionFor}; use sp_consensus::{BlockImport, SelectChain}; use sp_inherents::InherentDataProviders; @@ -242,6 +242,7 @@ mod host_functions; pub use host_functions::*; pub use node::*; +pub use utils::*; /// Wrapper trait for concrete type required by this testing framework. pub trait ChainInfo: Sized { @@ -279,6 +280,9 @@ pub trait ChainInfo: Sized { /// Signed extras, this function is caled in an externalities provided environment. fn signed_extras(from: ::AccountId) -> Self::SignedExtras; + /// config factory + fn config(task_executor: TaskExecutor) -> Configuration; + /// Attempt to create client parts, including block import, /// select chain strategy and consensus data provider. fn create_client_parts( diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index 6965c6a804dbe..2e6fc97c582a0 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -24,12 +24,10 @@ use manual_seal::{run_manual_seal, EngineCommand, ManualSealParams}; use sc_cli::build_runtime; use sc_client_api::{ backend::{self, Backend}, CallExecutor, ExecutorProvider, - execution_extensions::ExecutionStrategies, }; use sc_service::{ - build_network, spawn_tasks, BuildNetworkParams, SpawnTasksParams, TFullBackend, - TFullCallExecutor, TFullClient, TaskManager, TaskType, ChainSpec, BasePath, - Configuration, DatabaseConfig, KeepBlocks, TransactionStorageMode, config::KeystoreConfig, + build_network, spawn_tasks, BuildNetworkParams, SpawnTasksParams, + TFullBackend, TFullCallExecutor, TFullClient, TaskManager, TaskType, }; use sc_transaction_pool::BasicPool; use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata, OverlayedChanges, StorageTransactionCache}; @@ -45,13 +43,8 @@ use sp_state_machine::Ext; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; use sp_transaction_pool::TransactionPool; -pub use crate::utils::{logger, base_path}; -use crate::ChainInfo; +use crate::{ChainInfo, utils::logger}; use log::LevelFilter; -use sp_keyring::sr25519::Keyring::Alice; -use sc_network::{multiaddr, config::{NetworkConfiguration, TransportConfig, Role}}; -use sc_informant::OutputFormat; -use sc_executor::WasmExecutionMethod; /// This holds a reference to a running node on another thread, /// the node process is dropped when this struct is dropped @@ -91,12 +84,6 @@ pub struct Node { pub struct NodeConfig { /// A set of log targets you'd like to enable/disbale pub log_targets: Vec<(&'static str, LevelFilter)>, - - /// ChainSpec for the runtime - pub chain_spec: Box, - - /// wasm execution strategies. - pub execution_strategies: ExecutionStrategies, } type EventRecord = frame_system::EventRecord<::Event, ::Hash>; @@ -114,100 +101,20 @@ impl Node { + BlockBuilder + ApiExt as Backend>::State>, { - let NodeConfig { log_targets, mut chain_spec, execution_strategies } = node_config; + let NodeConfig { log_targets, } = node_config; let tokio_runtime = build_runtime().unwrap(); - - // unbounded logs, should be fine, test is shortlived. - let (log_sink, log_stream) = mpsc::unbounded(); - - logger(log_targets, tokio_runtime.handle().clone(), log_sink); let runtime_handle = tokio_runtime.handle().clone(); - let task_executor = move |fut, task_type| match task_type { TaskType::Async => runtime_handle.spawn(fut).map(drop), TaskType::Blocking => runtime_handle .spawn_blocking(move || futures::executor::block_on(fut)) .map(drop), }; + // unbounded logs, should be fine, test is shortlived. + let (log_sink, log_stream) = mpsc::unbounded(); - let base_path = if let Some(base) = base_path() { - BasePath::new(base) - } else { - BasePath::new_temp_dir().expect("couldn't create a temp dir") - }; - let root_path = base_path.path().to_path_buf().join("chains").join(chain_spec.id()); - - let key_seed = Alice.to_seed(); - let storage = chain_spec - .as_storage_builder() - .build_storage() - .expect("could not build storage"); - - chain_spec.set_storage(storage); - - let mut network_config = NetworkConfiguration::new( - format!("Test Node for: {}", key_seed), - "network/test/0.1", - Default::default(), - None, - ); - let informant_output_format = OutputFormat { enable_color: false }; - - network_config.allow_non_globals_in_dht = true; - - network_config - .listen_addresses - .push(multiaddr::Protocol::Memory(rand::random()).into()); - - network_config.transport = TransportConfig::MemoryOnly; - - let config = Configuration { - impl_name: "test-node".to_string(), - impl_version: "0.1".to_string(), - role: Role::Authority, - task_executor: task_executor.into(), - transaction_pool: Default::default(), - network: network_config, - keystore: KeystoreConfig::Path { - path: root_path.join("key"), - password: None, - }, - database: DatabaseConfig::RocksDb { - path: root_path.join("db"), - cache_size: 128, - }, - state_cache_size: 16777216, - state_cache_child_ratio: None, - chain_spec, - wasm_method: WasmExecutionMethod::Interpreted, - execution_strategies, - rpc_http: None, - rpc_ws: None, - rpc_ipc: None, - rpc_ws_max_connections: None, - rpc_cors: None, - rpc_methods: Default::default(), - prometheus_config: None, - telemetry_endpoints: None, - telemetry_external_transport: None, - default_heap_pages: None, - offchain_worker: Default::default(), - force_authoring: false, - disable_grandpa: false, - dev_key_seed: Some(key_seed), - tracing_targets: None, - tracing_receiver: Default::default(), - max_runtime_instances: 8, - announce_block: true, - base_path: Some(base_path), - wasm_runtime_overrides: None, - informant_output_format, - disable_log_reloading: false, - keystore_remote: None, - keep_blocks: KeepBlocks::All, - state_pruning: Default::default(), - transaction_storage: TransactionStorageMode::BlockBody, - }; + logger(log_targets, tokio_runtime.handle().clone(), log_sink); + let config = T::config(task_executor.into()); let ( client, @@ -448,7 +355,7 @@ impl Node { /// Revert all blocks added since creation of the node. pub fn clean(&self) { // if a db path was specified, revert all blocks we've added - if let Some(_) = base_path() { + if let Some(_) = std::env::var("DB_BASE_PATH").ok() { let diff = self.client.info().best_number - self.initial_block_number; self.revert_blocks(diff); } diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs index 7cd512e2d4869..d8ab3860f28a8 100644 --- a/test-utils/test-runner/src/utils.rs +++ b/test-utils/test-runner/src/utils.rs @@ -20,10 +20,21 @@ use futures::{Sink, SinkExt}; use std::fmt; use std::io::Write; use log::LevelFilter; +use sc_service::{BasePath, ChainSpec, Configuration, TaskExecutor, DatabaseConfig, KeepBlocks, TransactionStorageMode}; +use sp_keyring::sr25519::Keyring::Alice; +use sc_network::{multiaddr, config::{NetworkConfiguration, TransportConfig, Role}}; +use sc_informant::OutputFormat; +use sc_service::config::KeystoreConfig; +use sc_executor::WasmExecutionMethod; +use sc_client_api::execution_extensions::ExecutionStrategies; /// Base db path gotten from env -pub fn base_path() -> Option { - std::env::var("DB_BASE_PATH").ok() +pub fn base_path() -> BasePath { + if let Some(base) = std::env::var("DB_BASE_PATH").ok() { + BasePath::new(base) + } else { + BasePath::new_temp_dir().expect("couldn't create a temp dir") + } } /// Builds the global logger. @@ -54,3 +65,86 @@ where } let _ = builder.is_test(true).try_init(); } + +/// Produces a default configuration object, suitable for use with most set ups. +pub fn default_config(task_executor: TaskExecutor, mut chain_spec: Box) -> Configuration { + let base_path = base_path(); + let root_path = base_path.path().to_path_buf().join("chains").join(chain_spec.id()); + + let storage = chain_spec + .as_storage_builder() + .build_storage() + .expect("could not build storage"); + + chain_spec.set_storage(storage); + let key_seed = Alice.to_seed(); + + let mut network_config = NetworkConfiguration::new( + format!("Test Node for: {}", key_seed), + "network/test/0.1", + Default::default(), + None, + ); + let informant_output_format = OutputFormat { enable_color: false }; + network_config.allow_non_globals_in_dht = true; + + network_config + .listen_addresses + .push(multiaddr::Protocol::Memory(0).into()); + + network_config.transport = TransportConfig::MemoryOnly; + + Configuration { + impl_name: "test-node".to_string(), + impl_version: "0.1".to_string(), + role: Role::Authority, + task_executor: task_executor.into(), + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::Path { + path: root_path.join("key"), + password: None, + }, + database: DatabaseConfig::RocksDb { + path: root_path.join("db"), + cache_size: 128, + }, + state_cache_size: 16777216, + state_cache_child_ratio: None, + chain_spec, + wasm_method: WasmExecutionMethod::Interpreted, + execution_strategies: ExecutionStrategies { + syncing: sc_client_api::ExecutionStrategy::AlwaysWasm, + importing: sc_client_api::ExecutionStrategy::AlwaysWasm, + block_construction: sc_client_api::ExecutionStrategy::AlwaysWasm, + offchain_worker: sc_client_api::ExecutionStrategy::AlwaysWasm, + other: sc_client_api::ExecutionStrategy::AlwaysWasm, + }, + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + prometheus_config: None, + telemetry_endpoints: None, + telemetry_external_transport: None, + default_heap_pages: None, + offchain_worker: Default::default(), + force_authoring: false, + disable_grandpa: false, + dev_key_seed: Some(key_seed), + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(base_path), + wasm_runtime_overrides: None, + informant_output_format, + disable_log_reloading: false, + keystore_remote: None, + keep_blocks: KeepBlocks::All, + state_pruning: Default::default(), + transaction_storage: TransactionStorageMode::BlockBody, + } +} diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 3a11df62dc254..31403a5e6fa96 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -16,10 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] futures = { version = "0.3", features = ["compat"] } futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.28", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.28.1", features = ["websocket"] } console_error_panic_hook = "0.1.6" js-sys = "0.3.34" -wasm-bindgen = "0.2.57" +wasm-bindgen = "0.2.73" wasm-bindgen-futures = "0.4.18" kvdb-web = "0.9.0" sp-database = { version = "3.0.0", path = "../../primitives/database" } diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 146b0aa841333..80d95d1c86dcf 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -63,7 +63,7 @@ impl BenchmarkCmd { let genesis_storage = spec.build_storage()?; let mut changes = Default::default(); let cache_size = Some(self.database_cache_size as usize); - let state = BenchmarkingState::::new(genesis_storage, cache_size)?; + let state = BenchmarkingState::::new(genesis_storage, cache_size, self.record_proof)?; let executor = NativeExecutor::::new( wasm_method, self.heap_pages, @@ -126,19 +126,20 @@ impl BenchmarkCmd { // Print the table header batch.results[0].components.iter().for_each(|param| print!("{:?},", param.0)); - print!("extrinsic_time,storage_root_time,reads,repeat_reads,writes,repeat_writes\n"); + print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n"); // Print the values batch.results.iter().for_each(|result| { let parameters = &result.components; parameters.iter().for_each(|param| print!("{:?},", param.1)); // Print extrinsic time and storage root time - print!("{:?},{:?},{:?},{:?},{:?},{:?}\n", + print!("{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n", result.extrinsic_time, result.storage_root_time, result.reads, result.repeat_reads, result.writes, result.repeat_writes, + result.proof_size, ); }); diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 6784b1ecabf41..9862a5a5b82a7 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -97,6 +97,10 @@ pub struct BenchmarkCmd { #[structopt(long)] pub extra: bool, + /// Estimate PoV size. + #[structopt(long)] + pub record_proof: bool, + #[allow(missing_docs)] #[structopt(flatten)] pub shared_params: sc_cli::SharedParams, diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index aeed6ea1c9a83..6fd6cc6eefdc6 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -421,6 +421,7 @@ mod test { repeat_reads: 0, writes: (base + slope * i).into(), repeat_writes: 0, + proof_size: 0, } ) } diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index cb37119edf0b2..1fdf4e4cd9a97 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -16,6 +16,7 @@ sc-cli = { version = "0.9.0", path = "../../../client/cli" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } structopt = "0.3.8" frame-system = { version = "3.0.0", path = "../../../frame/system" } +frame-support = { version = "3.0.0", path = "../../../frame/support" } [dev-dependencies] diff --git a/utils/frame/frame-utilities-cli/src/lib.rs b/utils/frame/frame-utilities-cli/src/lib.rs index 2d6bf4ab9d8f1..83f3e9ea00d45 100644 --- a/utils/frame/frame-utilities-cli/src/lib.rs +++ b/utils/frame/frame-utilities-cli/src/lib.rs @@ -17,7 +17,7 @@ //! frame-system CLI utilities -mod module_id; +mod pallet_id; -pub use module_id::ModuleIdCmd; +pub use pallet_id::PalletIdCmd; diff --git a/utils/frame/frame-utilities-cli/src/module_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs similarity index 88% rename from utils/frame/frame-utilities-cli/src/module_id.rs rename to utils/frame/frame-utilities-cli/src/pallet_id.rs index 187c2de1dd6d5..09304979cb09f 100644 --- a/utils/frame/frame-utilities-cli/src/module_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -15,25 +15,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Implementation of the `moduleid` subcommand +//! Implementation of the `palletid` subcommand use sc_cli::{ Error, utils::print_from_uri, CryptoSchemeFlag, OutputTypeFlag, KeystoreParams, with_crypto_scheme, }; -use sp_runtime::ModuleId; use sp_runtime::traits::AccountIdConversion; use sp_core::crypto::{Ss58Codec, Ss58AddressFormat}; use std::convert::{TryInto, TryFrom}; use structopt::StructOpt; +use frame_support::PalletId; -/// The `moduleid` command +/// The `palletid` command #[derive(Debug, StructOpt)] #[structopt( - name = "moduleid", + name = "palletid", about = "Inspect a module ID address" )] -pub struct ModuleIdCmd { +pub struct PalletIdCmd { /// The module ID used to derive the account id: String, @@ -60,7 +60,7 @@ pub struct ModuleIdCmd { pub keystore_params: KeystoreParams, } -impl ModuleIdCmd { +impl PalletIdCmd { /// runs the command pub fn run(&self) -> Result<(), Error> where @@ -74,9 +74,9 @@ impl ModuleIdCmd { let id_fixed_array: [u8; 8] = self.id.as_bytes() .try_into() - .map_err(|_| "Cannot convert argument to moduleid: argument should be 8-character string")?; + .map_err(|_| "Cannot convert argument to palletid: argument should be 8-character string")?; - let account_id: R::AccountId = ModuleId(id_fixed_array).into_account(); + let account_id: R::AccountId = PalletId(id_fixed_array).into_account(); with_crypto_scheme!( self.crypto_scheme.scheme, diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index b8bee6380006a..7d372e8648eea 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -13,10 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee-http-client = { version = "0.2.0-alpha", default-features = false, features = ["tokio02"] } -# Needed by jsonrpsee-proc-macros: https://github.com/paritytech/jsonrpsee/issues/214 -jsonrpsee-types = "0.2.0-alpha.2" -jsonrpsee-proc-macros = "0.2.0-alpha.2" +jsonrpsee-http-client = { version = "=0.2.0-alpha.6", default-features = false, features = ["tokio02"] } +jsonrpsee-proc-macros = "=0.2.0-alpha.6" hex-literal = "0.3.1" env_logger = "0.8.2" diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 8d142100ec345..3ec16ea1982cd 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -113,7 +113,7 @@ use sp_core::{ storage::{StorageKey, StorageData}, }; use codec::{Encode, Decode}; -use jsonrpsee_http_client::{HttpClient, HttpConfig}; +use jsonrpsee_http_client::{HttpClient, HttpClientBuilder}; use sp_runtime::traits::Block as BlockT; @@ -173,7 +173,7 @@ impl Default for OnlineConfig { impl OnlineConfig { /// Return a new http rpc client. fn rpc(&self) -> HttpClient { - HttpClient::new(&self.uri, HttpConfig { max_request_body_size: u32::MAX }) + HttpClientBuilder::default().max_request_body_size(u32::MAX).build(&self.uri) .expect("valid HTTP url; qed") } } @@ -181,23 +181,19 @@ impl OnlineConfig { /// Configuration of the state snapshot. #[derive(Clone)] pub struct SnapshotConfig { - // TODO: I could mix these two into one filed, but I think separate is better bc one can be - // configurable while one not. - /// File name. - pub name: String, - /// Base directory. - pub directory: String, + /// The path to the snapshot file. + pub path: PathBuf, } -impl Default for SnapshotConfig { - fn default() -> Self { - Self { name: "SNAPSHOT".into(), directory: ".".into() } +impl SnapshotConfig { + pub fn new>(path: P) -> Self { + Self { path: path.into() } } } -impl SnapshotConfig { - fn path(&self) -> PathBuf { - Path::new(&self.directory).join(self.name.clone()) +impl Default for SnapshotConfig { + fn default() -> Self { + Self { path: Path::new("SNAPSHOT").into() } } } @@ -319,12 +315,12 @@ impl Builder { async fn pre_build(mut self) -> Result, &'static str> { let mut base_kv = match self.mode.clone() { - Mode::Offline(config) => self.load_state_snapshot(&config.state_snapshot.path())?, + Mode::Offline(config) => self.load_state_snapshot(&config.state_snapshot.path)?, Mode::Online(config) => { self.init_remote_client().await?; let kp = self.load_remote().await?; if let Some(c) = config.state_snapshot { - self.save_state_snapshot(&kp, &c.path())?; + self.save_state_snapshot(&kp, &c.path)?; } kp } @@ -399,7 +395,7 @@ mod tests { init_logger(); Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig { name: "test_data/proxy_test".into(), ..Default::default() }, + state_snapshot: SnapshotConfig { path: "test_data/proxy_test".into() }, })) .build() .await diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 57c0cda9cca3a..bbc51a28a59cd 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -207,10 +207,7 @@ where let call_data = account.encode(); let future_best_header = future_best_header .and_then(move |maybe_best_header| ready( - match maybe_best_header { - Some(best_header) => Ok(best_header), - None => Err(ClientError::UnknownBlock(format!("{}", best_hash))), - } + maybe_best_header.ok_or_else(|| { ClientError::UnknownBlock(format!("{}", best_hash)) }) )); let future_nonce = future_best_header.and_then(move |best_header| fetcher.remote_call(RemoteCallRequest { diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index ff8c5c08ec5b7..4d265c0995973 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -67,15 +67,14 @@ pub struct TryRuntimeCmd { pub enum State { /// Use a state snapshot as state to run the migration. Snap { - #[structopt(flatten)] - snapshot_path: SnapshotPath, + snapshot_path: PathBuf, }, /// Use a live chain to run the migration. Live { /// An optional state snapshot file to WRITE to. Not written if set to `None`. #[structopt(short, long)] - snapshot_path: Option, + snapshot_path: Option, /// The block hash at which to connect. /// Will be latest finalized head if not provided. @@ -118,31 +117,6 @@ fn parse_url(s: &str) -> Result { } } -#[derive(Debug, structopt::StructOpt)] -pub struct SnapshotPath { - /// The directory of the state snapshot. - #[structopt(short, long, default_value = ".")] - directory: String, - - /// The file name of the state snapshot. - #[structopt(default_value = "SNAPSHOT")] - file_name: String, -} - -impl FromStr for SnapshotPath { - type Err = &'static str; - fn from_str(s: &str) -> Result { - let p: PathBuf = s.parse().map_err(|_| "invalid path")?; - let parent = p.parent(); - let file_name = p.file_name(); - - file_name.and_then(|file_name| Some(Self { - directory: parent.map(|p| p.to_string_lossy().into()).unwrap_or(".".to_string()), - file_name: file_name.to_string_lossy().into() - })).ok_or("invalid path") - } -} - impl TryRuntimeCmd { pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> where @@ -182,12 +156,8 @@ impl TryRuntimeCmd { use remote_externalities::{Builder, Mode, SnapshotConfig, OfflineConfig, OnlineConfig}; let builder = match &self.state { State::Snap { snapshot_path } => { - let SnapshotPath { directory, file_name } = snapshot_path; Builder::::new().mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig { - name: file_name.into(), - directory: directory.into(), - }, + state_snapshot: SnapshotConfig::new(snapshot_path), })) }, State::Live { @@ -197,15 +167,10 @@ impl TryRuntimeCmd { modules } => Builder::::new().mode(Mode::Online(OnlineConfig { uri: url.into(), - state_snapshot: snapshot_path.as_ref().map(|c| SnapshotConfig { - name: c.file_name.clone(), - directory: c.directory.clone(), - }), + state_snapshot: snapshot_path.as_ref().map(SnapshotConfig::new), modules: modules.clone().unwrap_or_default(), - at: match block_at { - Some(b) => Some(b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))?), - None => None, - }, + at: block_at.as_ref() + .map(|b| b.parse().map_err(|e| format!("Could not parse hash: {:?}", e))).transpose()?, ..Default::default() })), }; diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index c9d165ce8a140..09c86ca76cc18 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -14,10 +14,11 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] build-helper = "0.1.1" -cargo_metadata = "0.12.0" +cargo_metadata = "0.13.1" tempfile = "3.1.0" toml = "0.5.4" walkdir = "2.3.1" wasm-gc-api = "0.1.11" atty = "0.2.13" ansi_term = "0.12.1" +sp-maybe-compressed-blob = { version = "3.0.0", path = "../../primitives/maybe-compressed-blob" } diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs index bfbc4030adfd1..9e8216f04fedb 100644 --- a/utils/wasm-builder/src/builder.rs +++ b/utils/wasm-builder/src/builder.rs @@ -217,7 +217,7 @@ fn generate_rerun_if_changed_instructions() { /// `project_cargo_toml` - The path to the `Cargo.toml` of the project that should be built. /// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. /// `features_to_enable` - Features that should be enabled for the project. -/// `wasm_binary_name` - The optional wasm binary name that is extended with `.compact.wasm`. +/// `wasm_binary_name` - The optional wasm binary name that is extended with `.compact.compressed.wasm`. /// If `None`, the project name will be used. fn build_project( file_name: PathBuf, diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 0392546575446..58161f53113fe 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -109,12 +109,12 @@ pub(crate) fn create_and_compile( project_cargo_toml, &wasm_workspace, &crate_metadata, - &crate_metadata.workspace_root, + crate_metadata.workspace_root.as_ref(), features_to_enable, ); build_project(&project, default_rustflags, cargo_cmd); - let (wasm_binary, bloaty) = compact_wasm_file( + let (wasm_binary, wasm_binary_compressed, bloaty) = compact_wasm_file( &project, project_cargo_toml, wasm_binary_name, @@ -124,9 +124,13 @@ pub(crate) fn create_and_compile( copy_wasm_to_target_directory(project_cargo_toml, wasm_binary) ); + wasm_binary_compressed.as_ref().map(|wasm_binary_compressed| + copy_wasm_to_target_directory(project_cargo_toml, wasm_binary_compressed) + ); + generate_rerun_if_changed_instructions(project_cargo_toml, &project, &wasm_workspace); - (wasm_binary, bloaty) + (wasm_binary_compressed.or(wasm_binary), bloaty) } /// Find the `Cargo.lock` relative to the `OUT_DIR` environment variable. @@ -254,6 +258,7 @@ fn create_project_cargo_toml( package.insert("name".into(), format!("{}-wasm", crate_name).into()); package.insert("version".into(), "1.0.0".into()); package.insert("edition".into(), "2018".into()); + package.insert("resolver".into(), "2".into()); wasm_workspace_toml.insert("package".into(), package.into()); @@ -412,7 +417,7 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), ); - build_cmd.args(&["-Zfeatures=build_dep", "rustc", "--target=wasm32-unknown-unknown"]) + build_cmd.args(&["rustc", "--target=wasm32-unknown-unknown"]) .arg(format!("--manifest-path={}", manifest_path.display())) .env("RUSTFLAGS", rustflags) // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). @@ -441,12 +446,12 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman } } -/// Compact the WASM binary using `wasm-gc`. Returns the path to the bloaty WASM binary. +/// Compact the WASM binary using `wasm-gc` and compress it using zstd. fn compact_wasm_file( project: &Path, cargo_manifest: &Path, wasm_binary_name: Option, -) -> (Option, WasmBinaryBloaty) { +) -> (Option, Option, WasmBinaryBloaty) { let is_release_build = is_release_build(); let target = if is_release_build { "release" } else { "debug" }; let default_wasm_binary_name = get_wasm_binary_name(cargo_manifest); @@ -468,6 +473,25 @@ fn compact_wasm_file( None }; + let wasm_compact_compressed_file = wasm_compact_file.as_ref() + .and_then(|compact_binary| { + let file_name = wasm_binary_name.clone() + .unwrap_or_else(|| default_wasm_binary_name.clone()); + + let wasm_compact_compressed_file = project.join( + format!( + "{}.compact.compressed.wasm", + file_name, + ) + ); + + if compress_wasm(&compact_binary.0, &wasm_compact_compressed_file) { + Some(WasmBinary(wasm_compact_compressed_file)) + } else { + None + } + }); + let bloaty_file_name = if let Some(name) = wasm_binary_name { format!("{}.wasm", name) } else { @@ -477,7 +501,36 @@ fn compact_wasm_file( let bloaty_file = project.join(bloaty_file_name); fs::copy(wasm_file, &bloaty_file).expect("Copying the bloaty file to the project dir."); - (wasm_compact_file, WasmBinaryBloaty(bloaty_file)) + ( + wasm_compact_file, + wasm_compact_compressed_file, + WasmBinaryBloaty(bloaty_file), + ) +} + +fn compress_wasm( + wasm_binary_path: &Path, + compressed_binary_out_path: &Path, +) -> bool { + use sp_maybe_compressed_blob::CODE_BLOB_BOMB_LIMIT; + + let data = fs::read(wasm_binary_path).expect("Failed to read WASM binary"); + if let Some(compressed) = sp_maybe_compressed_blob::compress( + &data, + CODE_BLOB_BOMB_LIMIT, + ) { + fs::write(compressed_binary_out_path, &compressed[..]) + .expect("Failed to write WASM binary"); + + true + } else { + println!( + "cargo:warning=Writing uncompressed wasm. Exceeded maximum size {}", + CODE_BLOB_BOMB_LIMIT, + ); + + false + } } /// Custom wrapper for a [`cargo_metadata::Package`] to store it in

, - /// The SelectChain Strategy - pub select_chain: SC, -+ /// A copy of the chain spec. -+ pub chain_spec: Box, - /// Whether to deny unsafe calls - pub deny_unsafe: DenyUnsafe, - /// BABE specific dependencies. -@@ -109,9 +112,8 @@ pub type IoHandler = jsonrpc_core::IoHandler; - pub fn create_full( - deps: FullDeps, - ) -> jsonrpc_core::IoHandler where -- C: ProvideRuntimeApi, -- C: HeaderBackend + HeaderMetadata + 'static, -- C: Send + Sync + 'static, -+ C: ProvideRuntimeApi + HeaderBackend + AuxStore + -+ HeaderMetadata + Sync + Send + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, -@@ -131,6 +133,7 @@ pub fn create_full( - client, - pool, - select_chain, -+ chain_spec, - deny_unsafe, - babe, - grandpa, -@@ -164,8 +167,8 @@ pub fn create_full( - io.extend_with( - sc_consensus_babe_rpc::BabeApi::to_delegate( - BabeRpcHandler::new( -- client, -- shared_epoch_changes, -+ client.clone(), -+ shared_epoch_changes.clone(), - keystore, - babe_config, - select_chain, -@@ -176,7 +179,7 @@ pub fn create_full( - io.extend_with( - sc_finality_grandpa_rpc::GrandpaApi::to_delegate( - GrandpaRpcHandler::new( -- shared_authority_set, -+ shared_authority_set.clone(), - shared_voter_state, - justification_stream, - subscription_executor, - -``` - -and add the new service: - -```diff= ---- a/bin/node/rpc/src/lib.rs -+++ b/bin/node/rpc/src/lib.rs -@@ -185,6 +188,18 @@ pub fn create_full( - ) - ); - -+ io.extend_with( -+ sc_sync_state_rpc::SyncStateRpcApi::to_delegate( -+ sc_sync_state_rpc::SyncStateRpcHandler::new( -+ chain_spec, -+ client, -+ shared_authority_set, -+ shared_epoch_changes, -+ deny_unsafe, -+ ) -+ ) -+ ); -+ - io - } -``` - -##### Telemetry - -The telemetry subsystem has seen a few fixes and refactorings to allow for a more flexible handling, in particular in regards to parachains. Most notably `sc_service::spawn_tasks` now returns the `telemetry_connection_notifier` as the second member of the tuple, (`let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(`), which should be passed to `telemetry_on_connect` of `new_full_base` now: `telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()),` (see the service-section below for a full diff). - -On the browser-side, this complicates setup a tiny bit, yet not terribly. Instead of `init_console_log`, we now use `init_logging_and_telemetry` and need to make sure we spawn the runner for its handleat the end (the other changes are formatting and cosmetics): - -```diff ---- a/bin/node/cli/src/browser.rs -+++ b/bin/node/cli/src/browser.rs -@@ -21,9 +21,8 @@ use log::info; - use wasm_bindgen::prelude::*; - use browser_utils::{ - Client, -- browser_configuration, set_console_error_panic_hook, init_console_log, -+ browser_configuration, init_logging_and_telemetry, set_console_error_panic_hook, - }; --use std::str::FromStr; - - /// Starts the client. - #[wasm_bindgen] -@@ -33,29 +32,38 @@ pub async fn start_client(chain_spec: Option, log_level: String) -> Resu - .map_err(|err| JsValue::from_str(&err.to_string())) - } - --async fn start_inner(chain_spec: Option, log_level: String) -> Result> { -+async fn start_inner( -+ chain_spec: Option, -+ log_directives: String, -+) -> Result> { - set_console_error_panic_hook(); -- init_console_log(log::Level::from_str(&log_level)?)?; -+ let telemetry_worker = init_logging_and_telemetry(&log_directives)?; - let chain_spec = match chain_spec { - Some(chain_spec) => ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) - .map_err(|e| format!("{:?}", e))?, - None => crate::chain_spec::development_config(), - }; - -- let config = browser_configuration(chain_spec).await?; -+ let telemetry_handle = telemetry_worker.handle(); -+ let config = browser_configuration( -+ chain_spec, -+ Some(telemetry_handle), -+ ).await?; - - info!("Substrate browser node"); - info!("✌️ version {}", config.impl_version); -- info!("❤️ by Parity Technologies, 2017-2020"); -+ info!("❤️ by Parity Technologies, 2017-2021"); - info!("📋 Chain specification: {}", config.chain_spec.name()); -- info!("🏷 Node name: {}", config.network.node_name); -+ info!("🏷 Node name: {}", config.network.node_name); - info!("👤 Role: {:?}", config.role); - - // Create the service. This is the most heavy initialization step. - let (task_manager, rpc_handlers) = - crate::service::new_light_base(config) -- .map(|(components, rpc_handlers, _, _, _)| (components, rpc_handlers)) -+ .map(|(components, rpc_handlers, _, _, _, _)| (components, rpc_handlers)) - .map_err(|e| format!("{:?}", e))?; - -+ task_manager.spawn_handle().spawn("telemetry", telemetry_worker.run()); -+ - Ok(browser_utils::start_client(task_manager, rpc_handlers)) - } - ``` - -##### Async & Remote Keystore support - -In order to allow for remote-keystores, the keystore-subsystem has been reworked to support async operations and generally refactored to not provide the keys itself but only sign on request. This allows for remote-keystore to never hand out keys and thus to operate any substrate-based node in a manner without ever having the private keys in the local system memory. - -There are some operations, however, that the keystore must be local for performance reasons and for which a remote keystore won't work (in particular around parachains). As such, the keystore has both a slot for remote but also always a local instance, where some operations hard bind to the local variant, while most subsystems just ask the generic keystore which prefers a remote signer if given. To reflect this change, `sc_service::new_full_parts` now returns a `KeystoreContainer` rather than the keystore, and the other subsystems (e.g. `sc_service::PartialComponents`) expect to be given that. - -###### on RPC: - -This has most visible changes for the rpc, where we are switching from the previous `KeyStorePtr` to the new `SyncCryptoStorePtr`: - -```diff - ---- a/bin/node/rpc/src/lib.rs -+++ b/bin/node/rpc/src/lib.rs -@@ -32,6 +32,7 @@ - - use std::sync::Arc; - -+use sp_keystore::SyncCryptoStorePtr; - use node_primitives::{Block, BlockNumber, AccountId, Index, Balance, Hash}; - use sc_consensus_babe::{Config, Epoch}; - use sc_consensus_babe_rpc::BabeRpcHandler; -@@ -40,7 +41,6 @@ use sc_finality_grandpa::{ - SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream - }; - use sc_finality_grandpa_rpc::GrandpaRpcHandler; --use sc_keystore::KeyStorePtr; - pub use sc_rpc_api::DenyUnsafe; - use sp_api::ProvideRuntimeApi; - use sp_block_builder::BlockBuilder; - pub struct LightDeps { -@@ -69,7 +70,7 @@ pub struct BabeDeps { - /// BABE pending epoch changes. - pub shared_epoch_changes: SharedEpochChanges, - /// The keystore that manages the keys of the node. -- pub keystore: KeyStorePtr, -+ pub keystore: SyncCryptoStorePtr, - } - -``` - -##### GRANDPA - -As already in the changelog, a few things significant things have changed in regards to GRANDPA: the finality tracker has been replaced, an RPC command has been added and WARP-sync-support for faster light client startup has been implemented. All this means we have to do a few changes to our GRANDPA setup procedures in the client. - -First and foremost, grandpa internalised a few aspects, and thus `new_partial` doesn't expect a tuple but only the `grandpa::SharedVoterState` as input now, and unpacking that again later is not needed anymore either. On the opposite side `grandpa::FinalityProofProvider::new_for_service` now requires the `Some(shared_authority_set)` to be passed as a new third parameter. This set also becomes relevant when adding warp-sync-support, which is added as an extra-protocol-layer to the networking as: -```diff= - -+ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); -+ -+ #[cfg(feature = "cli")] -+ config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( -+ &config, task_manager.spawn_handle(), backend.clone(), -+ )); -``` - -As these changes pull through the enitrety of `cli/src/service.rs`, we recommend looking at the final diff below for guidance. - -##### In a nutshell - -Altogether this accumulates to the following diff for `node/cli/src/service.rs`. If you want these features and have modified your chain you should probably try to apply these patches: - - -```diff= ---- a/bin/node/cli/src/service.rs -+++ b/bin/node/cli/src/service.rs -@@ -22,11 +22,10 @@ - - use std::sync::Arc; - use sc_consensus_babe; --use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; - use node_primitives::Block; - use node_runtime::RuntimeApi; - use sc_service::{ -- config::{Role, Configuration}, error::{Error as ServiceError}, -+ config::{Configuration}, error::{Error as ServiceError}, - RpcHandlers, TaskManager, - }; - use sp_inherents::InherentDataProviders; -@@ -34,8 +33,8 @@ use sc_network::{Event, NetworkService}; - use sp_runtime::traits::Block as BlockT; - use futures::prelude::*; - use sc_client_api::{ExecutorProvider, RemoteBackend}; --use sp_core::traits::BareCryptoStorePtr; - use node_executor::Executor; -+use sc_telemetry::TelemetryConnectionNotifier; - - type FullClient = sc_service::TFullClient; - type FullBackend = sc_service::TFullBackend; -@@ -58,13 +57,10 @@ pub fn new_partial(config: &Configuration) -> Result, - sc_consensus_babe::BabeLink, - ), -- ( -- grandpa::SharedVoterState, -- Arc>, -- ), -+ grandpa::SharedVoterState, - ) - >, ServiceError> { -- let (client, backend, keystore, task_manager) = -+ let (client, backend, keystore_container, task_manager) = - sc_service::new_full_parts::(&config)?; - let client = Arc::new(client); - -@@ -94,7 +90,6 @@ pub fn new_partial(config: &Configuration) -> Result Result Result Result, - &sc_consensus_babe::BabeLink, - ) - ) -> Result { - let sc_service::PartialComponents { -- client, backend, mut task_manager, import_queue, keystore, select_chain, transaction_pool, -+ client, -+ backend, -+ mut task_manager, -+ import_queue, -+ keystore_container, -+ select_chain, -+ transaction_pool, - inherent_data_providers, - other: (rpc_extensions_builder, import_setup, rpc_setup), - } = new_partial(&config)?; - -- let (shared_voter_state, finality_proof_provider) = rpc_setup; -+ let shared_voter_state = rpc_setup; -+ -+ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); -+ -+ #[cfg(feature = "cli")] -+ config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( -+ &config, task_manager.spawn_handle(), backend.clone(), -+ )); - - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { -@@ -191,8 +209,6 @@ pub fn new_full_base( - import_queue, - on_demand: None, - block_announce_validator_builder: None, -- finality_proof_request_builder: None, -- finality_proof_provider: Some(finality_proof_provider.clone()), - })?; - - if config.offchain_worker.enabled { -@@ -203,26 +219,28 @@ pub fn new_full_base( - - let role = config.role.clone(); - let force_authoring = config.force_authoring; -+ let backoff_authoring_blocks = -+ Some(sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging::default()); - let name = config.network.node_name.clone(); - let enable_grandpa = !config.disable_grandpa; - let prometheus_registry = config.prometheus_registry().cloned(); -- let telemetry_connection_sinks = sc_service::TelemetryConnectionSinks::default(); - -- sc_service::spawn_tasks(sc_service::SpawnTasksParams { -- config, -- backend: backend.clone(), -- client: client.clone(), -- keystore: keystore.clone(), -- network: network.clone(), -- rpc_extensions_builder: Box::new(rpc_extensions_builder), -- transaction_pool: transaction_pool.clone(), -- task_manager: &mut task_manager, -- on_demand: None, -- remote_blockchain: None, -- telemetry_connection_sinks: telemetry_connection_sinks.clone(), -- network_status_sinks: network_status_sinks.clone(), -- system_rpc_tx, -- })?; -+ let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( -+ sc_service::SpawnTasksParams { -+ config, -+ backend: backend.clone(), -+ client: client.clone(), -+ keystore: keystore_container.sync_keystore(), -+ network: network.clone(), -+ rpc_extensions_builder: Box::new(rpc_extensions_builder), -+ transaction_pool: transaction_pool.clone(), -+ task_manager: &mut task_manager, -+ on_demand: None, -+ remote_blockchain: None, -+ network_status_sinks: network_status_sinks.clone(), -+ system_rpc_tx, -+ }, -+ )?; - - let (block_import, grandpa_link, babe_link) = import_setup; - -@@ -230,6 +248,7 @@ pub fn new_full_base( - - if let sc_service::config::Role::Authority { .. } = &role { - let proposer = sc_basic_authorship::ProposerFactory::new( -+ task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - prometheus_registry.as_ref(), -@@ -239,7 +258,7 @@ pub fn new_full_base( - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let babe_config = sc_consensus_babe::BabeParams { -- keystore: keystore.clone(), -+ keystore: keystore_container.sync_keystore(), - client: client.clone(), - select_chain, - env: proposer, -@@ -247,6 +266,7 @@ pub fn new_full_base( - sync_oracle: network.clone(), - inherent_data_providers: inherent_data_providers.clone(), - force_authoring, -+ backoff_authoring_blocks, - babe_link, - can_author_with, - }; -@@ -256,42 +276,30 @@ pub fn new_full_base( - } - - // Spawn authority discovery module. -- if matches!(role, Role::Authority{..} | Role::Sentry {..}) { -- let (sentries, authority_discovery_role) = match role { -- sc_service::config::Role::Authority { ref sentry_nodes } => ( -- sentry_nodes.clone(), -- sc_authority_discovery::Role::Authority ( -- keystore.clone(), -- ), -- ), -- sc_service::config::Role::Sentry {..} => ( -- vec![], -- sc_authority_discovery::Role::Sentry, -- ), -- _ => unreachable!("Due to outer matches! constraint; qed.") -- }; -- -+ if role.is_authority() { -+ let authority_discovery_role = sc_authority_discovery::Role::PublishAndDiscover( -+ keystore_container.keystore(), -+ ); - let dht_event_stream = network.event_stream("authority-discovery") - .filter_map(|e| async move { match e { - Event::Dht(e) => Some(e), - _ => None, -- }}).boxed(); -+ }}); - let (authority_discovery_worker, _service) = sc_authority_discovery::new_worker_and_service( - client.clone(), - network.clone(), -- sentries, -- dht_event_stream, -+ Box::pin(dht_event_stream), - authority_discovery_role, - prometheus_registry.clone(), - ); - -- task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker); -+ task_manager.spawn_handle().spawn("authority-discovery-worker", authority_discovery_worker.run()); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { -- Some(keystore as BareCryptoStorePtr) -+ Some(keystore_container.sync_keystore()) - } else { - None - }; -@@ -317,8 +325,7 @@ pub fn new_full_base( - config, - link: grandpa_link, - network: network.clone(), -- inherent_data_providers: inherent_data_providers.clone(), -- telemetry_on_connect: Some(telemetry_connection_sinks.on_connect_stream()), -+ telemetry_on_connect: telemetry_connection_notifier.map(|x| x.on_connect_stream()), - voting_rule: grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, - shared_voter_state, -@@ -330,17 +337,15 @@ pub fn new_full_base( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); -- } else { -- grandpa::setup_disabled_grandpa( -- client.clone(), -- &inherent_data_providers, -- network.clone(), -- )?; - } - - network_starter.start_network(); - Ok(NewFullBase { -- task_manager, inherent_data_providers, client, network, network_status_sinks, -+ task_manager, -+ inherent_data_providers, -+ client, -+ network, -+ network_status_sinks, - transaction_pool, - }) - } -@@ -353,14 +358,16 @@ pub fn new_full(config: Configuration) - }) - } - --pub fn new_light_base(config: Configuration) -> Result<( -- TaskManager, RpcHandlers, Arc, -+pub fn new_light_base(mut config: Configuration) -> Result<( -+ TaskManager, RpcHandlers, Option, Arc, - Arc::Hash>>, - Arc>> - ), ServiceError> { -- let (client, backend, keystore, mut task_manager, on_demand) = -+ let (client, backend, keystore_container, mut task_manager, on_demand) = - sc_service::new_light_parts::(&config)?; - -+ config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); -+ - let select_chain = sc_consensus::LongestChain::new(backend.clone()); - - let transaction_pool = Arc::new(sc_transaction_pool::BasicPool::new_light( -@@ -371,14 +378,12 @@ pub fn new_light_base(config: Configuration) -> Result<( - on_demand.clone(), - )); - -- let grandpa_block_import = grandpa::light_block_import( -- client.clone(), backend.clone(), &(client.clone() as Arc<_>), -- Arc::new(on_demand.checker().clone()), -+ let (grandpa_block_import, _) = grandpa::block_import( -+ client.clone(), -+ &(client.clone() as Arc<_>), -+ select_chain.clone(), - )?; -- -- let finality_proof_import = grandpa_block_import.clone(); -- let finality_proof_request_builder = -- finality_proof_import.create_finality_proof_request_builder(); -+ let justification_import = grandpa_block_import.clone(); - - let (babe_block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, -@@ -391,8 +396,7 @@ pub fn new_light_base(config: Configuration) -> Result<( - let import_queue = sc_consensus_babe::import_queue( - babe_link, - babe_block_import, -- None, -- Some(Box::new(finality_proof_import)), -+ Some(Box::new(justification_import)), - client.clone(), - select_chain.clone(), - inherent_data_providers.clone(), -@@ -401,9 +405,6 @@ pub fn new_light_base(config: Configuration) -> Result<( - sp_consensus::NeverCanAuthor, - )?; - -- let finality_proof_provider = -- GrandpaFinalityProofProvider::new_for_service(backend.clone(), client.clone()); -- - let (network, network_status_sinks, system_rpc_tx, network_starter) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, -@@ -413,8 +414,6 @@ pub fn new_light_base(config: Configuration) -> Result<( - import_queue, - on_demand: Some(on_demand.clone()), - block_announce_validator_builder: None, -- finality_proof_request_builder: Some(finality_proof_request_builder), -- finality_proof_provider: Some(finality_proof_provider), - })?; - network_starter.start_network(); - -@@ -433,32 +432,39 @@ pub fn new_light_base(config: Configuration) -> Result<( - - let rpc_extensions = node_rpc::create_light(light_deps); - -- let rpc_handlers = -+ let (rpc_handlers, telemetry_connection_notifier) = - sc_service::spawn_tasks(sc_service::SpawnTasksParams { - on_demand: Some(on_demand), - remote_blockchain: Some(backend.remote_blockchain()), - rpc_extensions_builder: Box::new(sc_service::NoopRpcExtensionBuilder(rpc_extensions)), - client: client.clone(), - transaction_pool: transaction_pool.clone(), -- config, keystore, backend, network_status_sinks, system_rpc_tx, -+ keystore: keystore_container.sync_keystore(), -+ config, backend, network_status_sinks, system_rpc_tx, - network: network.clone(), -- telemetry_connection_sinks: sc_service::TelemetryConnectionSinks::default(), - task_manager: &mut task_manager, - })?; - -- Ok((task_manager, rpc_handlers, client, network, transaction_pool)) -+ Ok(( -+ task_manager, -+ rpc_handlers, -+ telemetry_connection_notifier, -+ client, -+ network, -+ transaction_pool, -+ )) - } - - /// Builds a new service for a light client. - pub fn new_light(config: Configuration) -> Result { -- new_light_base(config).map(|(task_manager, _, _, _, _)| { -+ new_light_base(config).map(|(task_manager, _, _, _, _, _)| { - task_manager - }) - } - - #[cfg(test)] - mod tests { -- use std::{sync::Arc, borrow::Cow, any::Any}; -+ use std::{sync::Arc, borrow::Cow, any::Any, convert::TryInto}; - use sc_consensus_babe::{CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY}; - use sc_consensus_epochs::descendent_query; - use sp_consensus::{ -@@ -469,20 +475,25 @@ mod tests { - use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; - use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; - use codec::Encode; -- use sp_core::{crypto::Pair as CryptoPair, H256}; -+ use sp_core::{ -+ crypto::Pair as CryptoPair, -+ H256, -+ Public -+ }; -+ use sp_keystore::{SyncCryptoStorePtr, SyncCryptoStore}; - use sp_runtime::{ - generic::{BlockId, Era, Digest, SignedPayload}, - traits::{Block as BlockT, Header as HeaderT}, - traits::Verify, - }; - use sp_timestamp; -- use sp_finality_tracker; - use sp_keyring::AccountKeyring; - use sc_service_test::TestNetNode; - use crate::service::{new_full_base, new_light_base, NewFullBase}; -- use sp_runtime::traits::IdentifyAccount; -+ use sp_runtime::{key_types::BABE, traits::IdentifyAccount, RuntimeAppPublic}; - use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; - use sc_client_api::BlockBackend; -+ use sc_keystore::LocalKeystore; - - type AccountPublic = ::Signer; - -@@ -492,15 +503,15 @@ mod tests { - #[ignore] - fn test_sync() { - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); -- let keystore = sc_keystore::Store::open(keystore_path.path(), None) -- .expect("Creates keystore"); -- let alice = keystore.write().insert_ephemeral_from_seed::("//Alice") -- .expect("Creates authority pair"); -+ let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(keystore_path.path(), None) -+ .expect("Creates keystore")); -+ let alice: sp_consensus_babe::AuthorityId = SyncCryptoStore::sr25519_generate_new(&*keystore, BABE, Some("//Alice")) -+ .expect("Creates authority pair").into(); - - let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); - - // For the block factory -- let mut slot_num = 1u64; -+ let mut slot = 1u64; - - // For the extrinsics factory - let bob = Arc::new(AccountKeyring::Bob.pair()); -@@ -528,14 +539,13 @@ mod tests { - Ok((node, (inherent_data_providers, setup_handles.unwrap()))) - }, - |config| { -- let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; -+ let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) - }, - |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { - let mut inherent_data = inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); -- inherent_data.replace_data(sp_finality_tracker::INHERENT_IDENTIFIER, &1u64); - - let parent_id = BlockId::number(service.client().chain_info().best_number); - let parent_header = service.client().header(&parent_id).unwrap().unwrap(); -@@ -552,6 +562,7 @@ mod tests { - ); - - let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( -+ service.spawn_handle(), - service.client(), - service.transaction_pool(), - None, -@@ -561,7 +572,7 @@ mod tests { - descendent_query(&*service.client()), - &parent_hash, - parent_number, -- slot_num, -+ slot.into(), - ).unwrap().unwrap(); - - let mut digest = Digest::::default(); -@@ -569,18 +580,18 @@ mod tests { - // even though there's only one authority some slots might be empty, - // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { -- inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); -+ inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot * SLOT_DURATION)); - if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( -- slot_num, -+ slot.into(), - &parent_header, - &*service.client(), -- &keystore, -+ keystore.clone(), - &babe_link, - ) { - break babe_pre_digest; - } - -- slot_num += 1; -+ slot += 1; - }; - - digest.push(::babe_pre_digest(babe_pre_digest)); -@@ -600,11 +611,18 @@ mod tests { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let to_sign = pre_hash.encode(); -- let signature = alice.sign(&to_sign[..]); -+ let signature = SyncCryptoStore::sign_with( -+ &*keystore, -+ sp_consensus_babe::AuthorityId::ID, -+ &alice.to_public_crypto_pair(), -+ &to_sign, -+ ).unwrap() -+ .try_into() -+ .unwrap(); - let item = ::babe_seal( -- signature.into(), -+ signature, - ); -- slot_num += 1; -+ slot += 1; - - let mut params = BlockImportParams::new(BlockOrigin::File, new_header); - params.post_digests.push(item); -@@ -679,7 +697,7 @@ mod tests { - Ok(sc_service_test::TestNetComponents::new(task_manager, client, network, transaction_pool)) - }, - |config| { -- let (keep_alive, _, client, network, transaction_pool) = new_light_base(config)?; -+ let (keep_alive, _, _, client, network, transaction_pool) = new_light_base(config)?; - Ok(sc_service_test::TestNetComponents::new(keep_alive, client, network, transaction_pool)) - }, - vec![ -``` diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index b62e8bac8ccc6..7137cf1d789a2 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } # Needed for various traits. In our case, `OnFinalize`. @@ -33,7 +32,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 227d45623d688..0d80ec5923d26 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -24,7 +24,7 @@ use super::*; use sp_runtime::traits::Bounded; use frame_system::RawOrigin as SystemOrigin; use frame_benchmarking::{ - benchmarks, account, whitelisted_caller, whitelist_account, impl_benchmark_test_suite + benchmarks_instance_pallet, account, whitelisted_caller, whitelist_account, impl_benchmark_test_suite }; use frame_support::traits::Get; use frame_support::{traits::EnsureOrigin, dispatch::UnfilteredDispatchable}; @@ -33,13 +33,13 @@ use crate::Pallet as Assets; const SEED: u32 = 0; -fn create_default_asset(is_sufficient: bool) +fn create_default_asset, I: 'static>(is_sufficient: bool) -> (T::AccountId, ::Source) { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); let root = SystemOrigin::Root.into(); - assert!(Assets::::force_create( + assert!(Assets::::force_create( root, Default::default(), caller_lookup.clone(), @@ -49,14 +49,14 @@ fn create_default_asset(is_sufficient: bool) (caller, caller_lookup) } -fn create_default_minted_asset(is_sufficient: bool, amount: T::Balance) +fn create_default_minted_asset, I: 'static>(is_sufficient: bool, amount: T::Balance) -> (T::AccountId, ::Source) { - let (caller, caller_lookup) = create_default_asset::(is_sufficient); + let (caller, caller_lookup) = create_default_asset::(is_sufficient); if !is_sufficient { T::Currency::make_free_balance_be(&caller, T::Currency::minimum_balance()); } - assert!(Assets::::mint( + assert!(Assets::::mint( SystemOrigin::Signed(caller.clone()).into(), Default::default(), caller_lookup.clone(), @@ -65,42 +65,42 @@ fn create_default_minted_asset(is_sufficient: bool, amount: T::Balanc (caller, caller_lookup) } -fn swap_is_sufficient(s: &mut bool) { - Asset::::mutate(&T::AssetId::default(), |maybe_a| +fn swap_is_sufficient, I: 'static>(s: &mut bool) { + Asset::::mutate(&T::AssetId::default(), |maybe_a| if let Some(ref mut a) = maybe_a { sp_std::mem::swap(s, &mut a.is_sufficient) } ); } -fn add_consumers(minter: T::AccountId, n: u32) { +fn add_consumers, I: 'static>(minter: T::AccountId, n: u32) { let origin = SystemOrigin::Signed(minter); let mut s = false; - swap_is_sufficient::(&mut s); + swap_is_sufficient::(&mut s); for i in 0..n { let target = account("consumer", i, SEED); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let target_lookup = T::Lookup::unlookup(target); - assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); + assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); } - swap_is_sufficient::(&mut s); + swap_is_sufficient::(&mut s); } -fn add_sufficients(minter: T::AccountId, n: u32) { +fn add_sufficients, I: 'static>(minter: T::AccountId, n: u32) { let origin = SystemOrigin::Signed(minter); let mut s = true; - swap_is_sufficient::(&mut s); + swap_is_sufficient::(&mut s); for i in 0..n { let target = account("sufficient", i, SEED); let target_lookup = T::Lookup::unlookup(target); - assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); + assert!(Assets::::mint(origin.clone().into(), Default::default(), target_lookup, 100u32.into()).is_ok()); } - swap_is_sufficient::(&mut s); + swap_is_sufficient::(&mut s); } -fn add_approvals(minter: T::AccountId, n: u32) { +fn add_approvals, I: 'static>(minter: T::AccountId, n: u32) { T::Currency::deposit_creating(&minter, T::ApprovalDeposit::get() * n.into()); let minter_lookup = T::Lookup::unlookup(minter.clone()); let origin = SystemOrigin::Signed(minter); - Assets::::mint( + Assets::::mint( origin.clone().into(), Default::default(), minter_lookup, @@ -110,7 +110,7 @@ fn add_approvals(minter: T::AccountId, n: u32) { let target = account("approval", i, SEED); T::Currency::make_free_balance_be(&target, T::Currency::minimum_balance()); let target_lookup = T::Lookup::unlookup(target); - Assets::::approve_transfer( + Assets::::approve_transfer( origin.clone().into(), Default::default(), target_lookup, @@ -119,7 +119,7 @@ fn add_approvals(minter: T::AccountId, n: u32) { } } -fn assert_last_event(generic_event: ::Event) { +fn assert_last_event, I: 'static>(generic_event: >::Event) { let events = frame_system::Pallet::::events(); let system_event: ::Event = generic_event.into(); // compare to the last event record @@ -127,7 +127,7 @@ fn assert_last_event(generic_event: ::Event) { assert_eq!(event, &system_event); } -fn assert_event(generic_event: ::Event) { +fn assert_event, I: 'static>(generic_event: >::Event) { let system_event: ::Event = generic_event.into(); let events = frame_system::Pallet::::events(); assert!(events.iter().any(|event_record| { @@ -135,14 +135,14 @@ fn assert_event(generic_event: ::Event) { })); } -benchmarks! { +benchmarks_instance_pallet! { create { let caller: T::AccountId = whitelisted_caller(); let caller_lookup = T::Lookup::unlookup(caller.clone()); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, 1u32.into()) verify { - assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); + assert_last_event::(Event::Created(Default::default(), caller.clone(), caller).into()); } force_create { @@ -150,127 +150,127 @@ benchmarks! { let caller_lookup = T::Lookup::unlookup(caller.clone()); }: _(SystemOrigin::Root, Default::default(), caller_lookup, true, 1u32.into()) verify { - assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); + assert_last_event::(Event::ForceCreated(Default::default(), caller).into()); } destroy { let c in 0 .. 5_000; let s in 0 .. 5_000; let a in 0 .. 5_00; - let (caller, _) = create_default_asset::(true); - add_consumers::(caller.clone(), c); - add_sufficients::(caller.clone(), s); - add_approvals::(caller.clone(), a); - let witness = Asset::::get(T::AssetId::default()).unwrap().destroy_witness(); + let (caller, _) = create_default_asset::(true); + add_consumers::(caller.clone(), c); + add_sufficients::(caller.clone(), s); + add_approvals::(caller.clone(), a); + let witness = Asset::::get(T::AssetId::default()).unwrap().destroy_witness(); }: _(SystemOrigin::Signed(caller), Default::default(), witness) verify { - assert_last_event::(Event::Destroyed(Default::default()).into()); + assert_last_event::(Event::Destroyed(Default::default()).into()); } mint { - let (caller, caller_lookup) = create_default_asset::(true); + let (caller, caller_lookup) = create_default_asset::(true); let amount = T::Balance::from(100u32); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) verify { - assert_last_event::(Event::Issued(Default::default(), caller, amount).into()); + assert_last_event::(Event::Issued(Default::default(), caller, amount).into()); } burn { let amount = T::Balance::from(100u32); - let (caller, caller_lookup) = create_default_minted_asset::(true, amount); + let (caller, caller_lookup) = create_default_minted_asset::(true, amount); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, amount) verify { - assert_last_event::(Event::Burned(Default::default(), caller, amount).into()); + assert_last_event::(Event::Burned(Default::default(), caller, amount).into()); } transfer { let amount = T::Balance::from(100u32); - let (caller, caller_lookup) = create_default_minted_asset::(true, amount); + let (caller, caller_lookup) = create_default_minted_asset::(true, amount); let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) verify { - assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); } transfer_keep_alive { let mint_amount = T::Balance::from(200u32); let amount = T::Balance::from(100u32); - let (caller, caller_lookup) = create_default_minted_asset::(true, mint_amount); + let (caller, caller_lookup) = create_default_minted_asset::(true, mint_amount); let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), target_lookup, amount) verify { assert!(frame_system::Pallet::::account_exists(&caller)); - assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); + assert_last_event::(Event::Transferred(Default::default(), caller, target, amount).into()); } force_transfer { let amount = T::Balance::from(100u32); - let (caller, caller_lookup) = create_default_minted_asset::(true, amount); + let (caller, caller_lookup) = create_default_minted_asset::(true, amount); let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup, target_lookup, amount) verify { - assert_last_event::( + assert_last_event::( Event::Transferred(Default::default(), caller, target, amount).into() ); } freeze { - let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) verify { - assert_last_event::(Event::Frozen(Default::default(), caller).into()); + assert_last_event::(Event::Frozen(Default::default(), caller).into()); } thaw { - let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); - Assets::::freeze( + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + Assets::::freeze( SystemOrigin::Signed(caller.clone()).into(), Default::default(), caller_lookup.clone(), )?; }: _(SystemOrigin::Signed(caller.clone()), Default::default(), caller_lookup) verify { - assert_last_event::(Event::Thawed(Default::default(), caller).into()); + assert_last_event::(Event::Thawed(Default::default(), caller).into()); } freeze_asset { - let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); }: _(SystemOrigin::Signed(caller.clone()), Default::default()) verify { - assert_last_event::(Event::AssetFrozen(Default::default()).into()); + assert_last_event::(Event::AssetFrozen(Default::default()).into()); } thaw_asset { - let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); - Assets::::freeze_asset( + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + Assets::::freeze_asset( SystemOrigin::Signed(caller.clone()).into(), Default::default(), )?; }: _(SystemOrigin::Signed(caller.clone()), Default::default()) verify { - assert_last_event::(Event::AssetThawed(Default::default()).into()); + assert_last_event::(Event::AssetThawed(Default::default()).into()); } transfer_ownership { - let (caller, _) = create_default_asset::(true); + let (caller, _) = create_default_asset::(true); let target: T::AccountId = account("target", 0, SEED); let target_lookup = T::Lookup::unlookup(target.clone()); }: _(SystemOrigin::Signed(caller), Default::default(), target_lookup) verify { - assert_last_event::(Event::OwnerChanged(Default::default(), target).into()); + assert_last_event::(Event::OwnerChanged(Default::default(), target).into()); } set_team { - let (caller, _) = create_default_asset::(true); + let (caller, _) = create_default_asset::(true); let target0 = T::Lookup::unlookup(account("target", 0, SEED)); let target1 = T::Lookup::unlookup(account("target", 1, SEED)); let target2 = T::Lookup::unlookup(account("target", 2, SEED)); }: _(SystemOrigin::Signed(caller), Default::default(), target0.clone(), target1.clone(), target2.clone()) verify { - assert_last_event::(Event::TeamChanged( + assert_last_event::(Event::TeamChanged( Default::default(), account("target", 0, SEED), account("target", 1, SEED), @@ -286,23 +286,23 @@ benchmarks! { let symbol = vec![0u8; s as usize]; let decimals = 12; - let (caller, _) = create_default_asset::(true); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let (caller, _) = create_default_asset::(true); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); }: _(SystemOrigin::Signed(caller), Default::default(), name.clone(), symbol.clone(), decimals) verify { let id = Default::default(); - assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); + assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); } clear_metadata { - let (caller, _) = create_default_asset::(true); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let (caller, _) = create_default_asset::(true); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let dummy = vec![0u8; T::StringLimit::get() as usize]; let origin = SystemOrigin::Signed(caller.clone()).into(); - Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; + Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; }: _(SystemOrigin::Signed(caller), Default::default()) verify { - assert_last_event::(Event::MetadataCleared(Default::default()).into()); + assert_last_event::(Event::MetadataCleared(Default::default()).into()); } force_set_metadata { @@ -313,10 +313,10 @@ benchmarks! { let symbol = vec![0u8; s as usize]; let decimals = 12; - create_default_asset::(true); + create_default_asset::(true); let origin = T::ForceOrigin::successful_origin(); - let call = Call::::force_set_metadata( + let call = Call::::force_set_metadata( Default::default(), name.clone(), symbol.clone(), @@ -326,28 +326,28 @@ benchmarks! { }: { call.dispatch_bypass_filter(origin)? } verify { let id = Default::default(); - assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); + assert_last_event::(Event::MetadataSet(id, name, symbol, decimals, false).into()); } force_clear_metadata { - let (caller, _) = create_default_asset::(true); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let (caller, _) = create_default_asset::(true); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let dummy = vec![0u8; T::StringLimit::get() as usize]; let origin = SystemOrigin::Signed(caller.clone()).into(); - Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; + Assets::::set_metadata(origin, Default::default(), dummy.clone(), dummy, 12)?; let origin = T::ForceOrigin::successful_origin(); - let call = Call::::force_clear_metadata(Default::default()); + let call = Call::::force_clear_metadata(Default::default()); }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::MetadataCleared(Default::default()).into()); + assert_last_event::(Event::MetadataCleared(Default::default()).into()); } force_asset_status { - let (caller, caller_lookup) = create_default_asset::(true); + let (caller, caller_lookup) = create_default_asset::(true); let origin = T::ForceOrigin::successful_origin(); - let call = Call::::force_asset_status( + let call = Call::::force_asset_status( Default::default(), caller_lookup.clone(), caller_lookup.clone(), @@ -359,12 +359,12 @@ benchmarks! { ); }: { call.dispatch_bypass_filter(origin)? } verify { - assert_last_event::(Event::AssetStatusChanged(Default::default()).into()); + assert_last_event::(Event::AssetStatusChanged(Default::default()).into()); } approve_transfer { - let (caller, _) = create_default_minted_asset::(true, 100u32.into()); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let (caller, _) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let id = Default::default(); let delegate: T::AccountId = account("delegate", 0, SEED); @@ -372,12 +372,12 @@ benchmarks! { let amount = 100u32.into(); }: _(SystemOrigin::Signed(caller.clone()), id, delegate_lookup, amount) verify { - assert_last_event::(Event::ApprovedTransfer(id, caller, delegate, amount).into()); + assert_last_event::(Event::ApprovedTransfer(id, caller, delegate, amount).into()); } transfer_approved { - let (owner, owner_lookup) = create_default_minted_asset::(true, 100u32.into()); - T::Currency::make_free_balance_be(&owner, DepositBalanceOf::::max_value()); + let (owner, owner_lookup) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&owner, DepositBalanceOf::::max_value()); let id = Default::default(); let delegate: T::AccountId = account("delegate", 0, SEED); @@ -385,44 +385,44 @@ benchmarks! { let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let amount = 100u32.into(); let origin = SystemOrigin::Signed(owner.clone()).into(); - Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; + Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; let dest: T::AccountId = account("dest", 0, SEED); let dest_lookup = T::Lookup::unlookup(dest.clone()); }: _(SystemOrigin::Signed(delegate.clone()), id, owner_lookup, dest_lookup, amount) verify { assert!(T::Currency::reserved_balance(&owner).is_zero()); - assert_event::(Event::Transferred(id, owner, dest, amount).into()); + assert_event::(Event::Transferred(id, owner, dest, amount).into()); } cancel_approval { - let (caller, _) = create_default_minted_asset::(true, 100u32.into()); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let (caller, _) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let id = Default::default(); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let amount = 100u32.into(); let origin = SystemOrigin::Signed(caller.clone()).into(); - Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; + Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; }: _(SystemOrigin::Signed(caller.clone()), id, delegate_lookup) verify { - assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); + assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); } force_cancel_approval { - let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); - T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); + let (caller, caller_lookup) = create_default_minted_asset::(true, 100u32.into()); + T::Currency::make_free_balance_be(&caller, DepositBalanceOf::::max_value()); let id = Default::default(); let delegate: T::AccountId = account("delegate", 0, SEED); let delegate_lookup = T::Lookup::unlookup(delegate.clone()); let amount = 100u32.into(); let origin = SystemOrigin::Signed(caller.clone()).into(); - Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; + Assets::::approve_transfer(origin, id, delegate_lookup.clone(), amount)?; }: _(SystemOrigin::Signed(caller.clone()), id, caller_lookup, delegate_lookup) verify { - assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); + assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); } } diff --git a/frame/assets/src/extra_mutator.rs b/frame/assets/src/extra_mutator.rs new file mode 100644 index 0000000000000..d86d78ce3e376 --- /dev/null +++ b/frame/assets/src/extra_mutator.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Datatype for easy mutation of the extra "sidecar" data. + +use super::*; + +/// A mutator type allowing inspection and possible modification of the extra "sidecar" data. +/// +/// This may be used as a `Deref` for the pallet's extra data. If mutated (using `DerefMut`), then +/// any uncommitted changes (see `commit` function) will be automatically committed to storage when +/// dropped. Changes, even after committed, may be reverted to their original values with the +/// `revert` function. +pub struct ExtraMutator, I: 'static = ()> { + id: T::AssetId, + who: T::AccountId, + original: T::Extra, + pending: Option, +} + +impl, I: 'static> Drop for ExtraMutator { + fn drop(&mut self) { + debug_assert!( + self.commit().is_ok(), + "attempt to write to non-existent asset account" + ); + } +} + +impl, I: 'static> sp_std::ops::Deref for ExtraMutator { + type Target = T::Extra; + fn deref(&self) -> &T::Extra { + match self.pending { + Some(ref value) => value, + None => &self.original, + } + } +} + +impl, I: 'static> sp_std::ops::DerefMut for ExtraMutator { + fn deref_mut(&mut self) -> &mut T::Extra { + if self.pending.is_none() { + self.pending = Some(self.original.clone()); + } + self.pending.as_mut().unwrap() + } +} + +impl, I: 'static> ExtraMutator { + pub(super) fn maybe_new( + id: T::AssetId, + who: impl sp_std::borrow::Borrow, + ) -> Option> { + if Account::::contains_key(id, who.borrow()) { + Some(ExtraMutator:: { + id, + who: who.borrow().clone(), + original: Account::::get(id, who.borrow()).extra, + pending: None, + }) + } else { + None + } + } + + /// Commit any changes to storage. + pub fn commit(&mut self) -> Result<(), ()> { + if let Some(extra) = self.pending.take() { + Account::::try_mutate_exists(self.id, self.who.borrow(), |maybe_account| { + if let Some(ref mut account) = maybe_account { + account.extra = extra; + Ok(()) + } else { + Err(()) + } + }) + } else { + Ok(()) + } + } + + /// Revert any changes, even those already committed by `self` and drop self. + pub fn revert(mut self) -> Result<(), ()> { + self.pending = None; + Account::::try_mutate_exists(self.id, self.who.borrow(), |maybe_account| { + if let Some(ref mut account) = maybe_account { + account.extra = self.original.clone(); + Ok(()) + } else { + Err(()) + } + }) + } +} diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs new file mode 100644 index 0000000000000..13c92f781b071 --- /dev/null +++ b/frame/assets/src/functions.rs @@ -0,0 +1,479 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Functions for the Assets pallet. + +use super::*; + +// The main implementation block for the module. +impl, I: 'static> Pallet { + // Public immutables + + /// Return the extra "sid-car" data for `id`/`who`, or `None` if the account doesn't exist. + pub fn adjust_extra( + id: T::AssetId, + who: impl sp_std::borrow::Borrow, + ) -> Option> { + ExtraMutator::maybe_new(id, who) + } + + /// Get the asset `id` balance of `who`. + pub fn balance(id: T::AssetId, who: impl sp_std::borrow::Borrow) -> T::Balance { + Account::::get(id, who.borrow()).balance + } + + /// Get the total supply of an asset `id`. + pub fn total_supply(id: T::AssetId) -> T::Balance { + Asset::::get(id) + .map(|x| x.supply) + .unwrap_or_else(Zero::zero) + } + + pub(super) fn new_account( + who: &T::AccountId, + d: &mut AssetDetails>, + ) -> Result { + let accounts = d.accounts.checked_add(1).ok_or(Error::::Overflow)?; + let is_sufficient = if d.is_sufficient { + frame_system::Pallet::::inc_sufficients(who); + d.sufficients += 1; + true + } else { + frame_system::Pallet::::inc_consumers(who).map_err(|_| Error::::NoProvider)?; + false + }; + d.accounts = accounts; + Ok(is_sufficient) + } + + pub(super) fn dead_account( + what: T::AssetId, + who: &T::AccountId, + d: &mut AssetDetails>, + sufficient: bool, + ) { + if sufficient { + d.sufficients = d.sufficients.saturating_sub(1); + frame_system::Pallet::::dec_sufficients(who); + } else { + frame_system::Pallet::::dec_consumers(who); + } + d.accounts = d.accounts.saturating_sub(1); + T::Freezer::died(what, who) + } + + pub(super) fn can_increase( + id: T::AssetId, + who: &T::AccountId, + amount: T::Balance, + ) -> DepositConsequence { + let details = match Asset::::get(id) { + Some(details) => details, + None => return DepositConsequence::UnknownAsset, + }; + if details.supply.checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + let account = Account::::get(id, who); + if account.balance.checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + if account.balance.is_zero() { + if amount < details.min_balance { + return DepositConsequence::BelowMinimum + } + if !details.is_sufficient && frame_system::Pallet::::providers(who) == 0 { + return DepositConsequence::CannotCreate + } + if details.is_sufficient && details.sufficients.checked_add(1).is_none() { + return DepositConsequence::Overflow + } + } + + DepositConsequence::Success + } + + /// Return the consequence of a withdraw. + pub(super) fn can_decrease( + id: T::AssetId, + who: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> WithdrawConsequence { + use WithdrawConsequence::*; + let details = match Asset::::get(id) { + Some(details) => details, + None => return UnknownAsset, + }; + if details.supply.checked_sub(&amount).is_none() { + return Underflow + } + if details.is_frozen { + return Frozen + } + let account = Account::::get(id, who); + if account.is_frozen { + return Frozen + } + if let Some(rest) = account.balance.checked_sub(&amount) { + if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + match frozen.checked_add(&details.min_balance) { + Some(required) if rest < required => return Frozen, + None => return Overflow, + _ => {} + } + } + + let is_provider = false; + let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); + let must_keep_alive = keep_alive || is_required; + + if rest < details.min_balance { + if must_keep_alive { + WouldDie + } else { + ReducedToZero(rest) + } + } else { + Success + } + } else { + NoFunds + } + } + + // Maximum `amount` that can be passed into `can_withdraw` to result in a `WithdrawConsequence` + // of `Success`. + pub(super) fn reducible_balance( + id: T::AssetId, + who: &T::AccountId, + keep_alive: bool, + ) -> Result> { + let details = Asset::::get(id).ok_or_else(|| Error::::Unknown)?; + ensure!(!details.is_frozen, Error::::Frozen); + + let account = Account::::get(id, who); + ensure!(!account.is_frozen, Error::::Frozen); + + let amount = if let Some(frozen) = T::Freezer::frozen_balance(id, who) { + // Frozen balance: account CANNOT be deleted + let required = frozen + .checked_add(&details.min_balance) + .ok_or(Error::::Overflow)?; + account.balance.saturating_sub(required) + } else { + let is_provider = false; + let is_required = is_provider && !frame_system::Pallet::::can_dec_provider(who); + if keep_alive || is_required { + // We want to keep the account around. + account.balance.saturating_sub(details.min_balance) + } else { + // Don't care if the account dies + account.balance + } + }; + Ok(amount.min(details.supply)) + } + + /// Make preparatory checks for debiting some funds from an account. Flags indicate requirements + /// of the debit. + /// + /// - `amount`: The amount desired to be debited. The actual amount returned for debit may be + /// less (in the case of `best_effort` being `true`) or greater by up to the minimum balance + /// less one. + /// - `keep_alive`: Require that `target` must stay alive. + /// - `respect_freezer`: Respect any freezes on the account or token (or not). + /// - `best_effort`: The debit amount may be less than `amount`. + /// + /// On success, the amount which should be debited (this will always be at least `amount` unless + /// `best_effort` is `true`) together with an optional value indicating the argument which must + /// be passed into the `melted` function of the `T::Freezer` if `Some`. + /// + /// If no valid debit can be made then return an `Err`. + pub(super) fn prep_debit( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + f: DebitFlags, + ) -> Result { + let actual = Self::reducible_balance(id, target, f.keep_alive)?.min(amount); + ensure!(f.best_effort || actual >= amount, Error::::BalanceLow); + + let conseq = Self::can_decrease(id, target, actual, f.keep_alive); + let actual = match conseq.into_result() { + Ok(dust) => actual.saturating_add(dust), //< guaranteed by reducible_balance + Err(e) => { + debug_assert!(false, "passed from reducible_balance; qed"); + return Err(e.into()) + } + }; + + Ok(actual) + } + + /// Make preparatory checks for crediting some funds from an account. Flags indicate + /// requirements of the credit. + /// + /// - `amount`: The amount desired to be credited. + /// - `debit`: The amount by which some other account has been debited. If this is greater than + /// `amount`, then the `burn_dust` parameter takes effect. + /// - `burn_dust`: Indicates that in the case of debit being greater than amount, the additional + /// (dust) value should be burned, rather than credited. + /// + /// On success, the amount which should be credited (this will always be at least `amount`) + /// together with an optional value indicating the value which should be burned. The latter + /// will always be `None` as long as `burn_dust` is `false` or `debit` is no greater than + /// `amount`. + /// + /// If no valid credit can be made then return an `Err`. + pub(super) fn prep_credit( + id: T::AssetId, + dest: &T::AccountId, + amount: T::Balance, + debit: T::Balance, + burn_dust: bool, + ) -> Result<(T::Balance, Option), DispatchError> { + let (credit, maybe_burn) = match (burn_dust, debit.checked_sub(&amount)) { + (true, Some(dust)) => (amount, Some(dust)), + _ => (debit, None), + }; + Self::can_increase(id, &dest, credit).into_result()?; + Ok((credit, maybe_burn)) + } + + /// Increases the asset `id` balance of `beneficiary` by `amount`. + /// + /// This alters the registered supply of the asset and emits an event. + /// + /// Will return an error or will increase the amount by exactly `amount`. + pub(super) fn do_mint( + id: T::AssetId, + beneficiary: &T::AccountId, + amount: T::Balance, + maybe_check_issuer: Option, + ) -> DispatchResult { + Self::increase_balance(id, beneficiary, amount, |details| -> DispatchResult { + if let Some(check_issuer) = maybe_check_issuer { + ensure!( + &check_issuer == &details.issuer, + Error::::NoPermission + ); + } + debug_assert!(T::Balance::max_value() - details.supply >= amount, "checked in prep; qed"); + details.supply = details.supply.saturating_add(amount); + Ok(()) + })?; + Self::deposit_event(Event::Issued(id, beneficiary.clone(), amount)); + Ok(()) + } + + /// Increases the asset `id` balance of `beneficiary` by `amount`. + /// + /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_mint` if you need + /// that. This is not intended to be used alone. + /// + /// Will return an error or will increase the amount by exactly `amount`. + pub(super) fn increase_balance( + id: T::AssetId, + beneficiary: &T::AccountId, + amount: T::Balance, + check: impl FnOnce( + &mut AssetDetails>, + ) -> DispatchResult, + ) -> DispatchResult { + if amount.is_zero() { return Ok(()) } + + Self::can_increase(id, beneficiary, amount).into_result()?; + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + check(details)?; + + Account::::try_mutate(id, beneficiary, |t| -> DispatchResult { + let new_balance = t.balance.saturating_add(amount); + ensure!(new_balance >= details.min_balance, TokenError::BelowMinimum); + if t.balance.is_zero() { + t.sufficient = Self::new_account(beneficiary, details)?; + } + t.balance = new_balance; + Ok(()) + })?; + Ok(()) + })?; + Ok(()) + } + + /// Reduces asset `id` balance of `target` by `amount`. Flags `f` can be given to alter whether + /// it attempts a `best_effort` or makes sure to `keep_alive` the account. + /// + /// This alters the registered supply of the asset and emits an event. + /// + /// Will return an error and do nothing or will decrease the amount and return the amount + /// reduced by. + pub(super) fn do_burn( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + maybe_check_admin: Option, + f: DebitFlags, + ) -> Result { + let actual = Self::decrease_balance(id, target, amount, f, |actual, details| { + // Check admin rights. + if let Some(check_admin) = maybe_check_admin { + ensure!(&check_admin == &details.admin, Error::::NoPermission); + } + + debug_assert!(details.supply >= actual, "checked in prep; qed"); + details.supply = details.supply.saturating_sub(actual); + + Ok(()) + })?; + Self::deposit_event(Event::Burned(id, target.clone(), actual)); + Ok(actual) + } + + /// Reduces asset `id` balance of `target` by `amount`. Flags `f` can be given to alter whether + /// it attempts a `best_effort` or makes sure to `keep_alive` the account. + /// + /// LOW-LEVEL: Does not alter the supply of asset or emit an event. Use `do_burn` if you need + /// that. This is not intended to be used alone. + /// + /// Will return an error and do nothing or will decrease the amount and return the amount + /// reduced by. + pub(super) fn decrease_balance( + id: T::AssetId, + target: &T::AccountId, + amount: T::Balance, + f: DebitFlags, + check: impl FnOnce( + T::Balance, + &mut AssetDetails>, + ) -> DispatchResult, + ) -> Result { + if amount.is_zero() { return Ok(amount) } + + let actual = Self::prep_debit(id, target, amount, f)?; + + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + check(actual, details)?; + + Account::::try_mutate_exists(id, target, |maybe_account| -> DispatchResult { + let mut account = maybe_account.take().unwrap_or_default(); + debug_assert!(account.balance >= actual, "checked in prep; qed"); + + // Make the debit. + account.balance = account.balance.saturating_sub(actual); + *maybe_account = if account.balance < details.min_balance { + debug_assert!(account.balance.is_zero(), "checked in prep; qed"); + Self::dead_account(id, target, details, account.sufficient); + None + } else { + Some(account) + }; + Ok(()) + })?; + + Ok(()) + })?; + + Ok(actual) + } + + /// Reduces the asset `id` balance of `source` by some `amount` and increases the balance of + /// `dest` by (similar) amount. + /// + /// Returns the actual amount placed into `dest`. Exact semantics are determined by the flags + /// `f`. + /// + /// Will fail if the amount transferred is so small that it cannot create the destination due + /// to minimum balance requirements. + pub(super) fn do_transfer( + id: T::AssetId, + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + maybe_need_admin: Option, + f: TransferFlags, + ) -> Result { + // Early exist if no-op. + if amount.is_zero() { + Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), amount)); + return Ok(amount) + } + + // Figure out the debit and credit, together with side-effects. + let debit = Self::prep_debit(id, &source, amount, f.into())?; + let (credit, maybe_burn) = Self::prep_credit(id, &dest, amount, debit, f.burn_dust)?; + + let mut source_account = Account::::get(id, &source); + + Asset::::try_mutate(id, |maybe_details| -> DispatchResult { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + + // Check admin rights. + if let Some(need_admin) = maybe_need_admin { + ensure!(&need_admin == &details.admin, Error::::NoPermission); + } + + // Skip if source == dest + if source == dest { + return Ok(()) + } + + // Burn any dust if needed. + if let Some(burn) = maybe_burn { + // Debit dust from supply; this will not saturate since it's already checked in prep. + debug_assert!(details.supply >= burn, "checked in prep; qed"); + details.supply = details.supply.saturating_sub(burn); + } + + // Debit balance from source; this will not saturate since it's already checked in prep. + debug_assert!(source_account.balance >= debit, "checked in prep; qed"); + source_account.balance = source_account.balance.saturating_sub(debit); + + Account::::try_mutate(id, &dest, |a| -> DispatchResult { + // Calculate new balance; this will not saturate since it's already checked in prep. + debug_assert!(a.balance.checked_add(&credit).is_some(), "checked in prep; qed"); + let new_balance = a.balance.saturating_add(credit); + + // Create a new account if there wasn't one already. + if a.balance.is_zero() { + a.sufficient = Self::new_account(&dest, details)?; + } + + a.balance = new_balance; + Ok(()) + })?; + + // Remove source account if it's now dead. + if source_account.balance < details.min_balance { + debug_assert!(source_account.balance.is_zero(), "checked in prep; qed"); + Self::dead_account(id, &source, details, source_account.sufficient); + Account::::remove(id, &source); + } else { + Account::::insert(id, &source, &source_account) + } + + Ok(()) + })?; + + Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), credit)); + Ok(credit) + } +} diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs new file mode 100644 index 0000000000000..d0ab13072a88d --- /dev/null +++ b/frame/assets/src/impl_fungibles.rs @@ -0,0 +1,156 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementations for fungibles trait. + +use super::*; + +impl, I: 'static> fungibles::Inspect<::AccountId> for Pallet { + type AssetId = T::AssetId; + type Balance = T::Balance; + + fn total_issuance(asset: Self::AssetId) -> Self::Balance { + Asset::::get(asset) + .map(|x| x.supply) + .unwrap_or_else(Zero::zero) + } + + fn minimum_balance(asset: Self::AssetId) -> Self::Balance { + Asset::::get(asset) + .map(|x| x.min_balance) + .unwrap_or_else(Zero::zero) + } + + fn balance(asset: Self::AssetId, who: &::AccountId) -> Self::Balance { + Pallet::::balance(asset, who) + } + + fn reducible_balance( + asset: Self::AssetId, + who: &::AccountId, + keep_alive: bool, + ) -> Self::Balance { + Pallet::::reducible_balance(asset, who, keep_alive).unwrap_or(Zero::zero()) + } + + fn can_deposit( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> DepositConsequence { + Pallet::::can_increase(asset, who, amount) + } + + fn can_withdraw( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> WithdrawConsequence { + Pallet::::can_decrease(asset, who, amount, false) + } +} + +impl, I: 'static> fungibles::Mutate<::AccountId> for Pallet { + fn mint_into( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> DispatchResult { + Self::do_mint(asset, who, amount, None) + } + + fn burn_from( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { + keep_alive: false, + best_effort: false, + }; + Self::do_burn(asset, who, amount, None, f) + } + + fn slash( + asset: Self::AssetId, + who: &::AccountId, + amount: Self::Balance, + ) -> Result { + let f = DebitFlags { + keep_alive: false, + best_effort: true, + }; + Self::do_burn(asset, who, amount, None, f) + } +} + +impl, I: 'static> fungibles::Transfer for Pallet { + fn transfer( + asset: Self::AssetId, + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> Result { + let f = TransferFlags { + keep_alive, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(asset, source, dest, amount, None, f) + } +} + +impl, I: 'static> fungibles::Unbalanced for Pallet { + fn set_balance(_: Self::AssetId, _: &T::AccountId, _: Self::Balance) -> DispatchResult { + unreachable!("set_balance is not used if other functions are impl'd"); + } + fn set_total_issuance(id: T::AssetId, amount: Self::Balance) { + Asset::::mutate_exists(id, |maybe_asset| { + if let Some(ref mut asset) = maybe_asset { + asset.supply = amount + } + }); + } + fn decrease_balance(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Result + { + let f = DebitFlags { keep_alive: false, best_effort: false }; + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) + } + fn decrease_balance_at_most(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Self::Balance + { + let f = DebitFlags { keep_alive: false, best_effort: true }; + Self::decrease_balance(asset, who, amount, f, |_, _| Ok(())) + .unwrap_or(Zero::zero()) + } + fn increase_balance(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Result + { + Self::increase_balance(asset, who, amount, |_| Ok(()))?; + Ok(amount) + } + fn increase_balance_at_most(asset: T::AssetId, who: &T::AccountId, amount: Self::Balance) + -> Self::Balance + { + match Self::increase_balance(asset, who, amount, |_| Ok(())) { + Ok(()) => amount, + Err(_) => Zero::zero(), + } + } +} diff --git a/frame/assets/src/impl_stored_map.rs b/frame/assets/src/impl_stored_map.rs new file mode 100644 index 0000000000000..6e91e5c1322f7 --- /dev/null +++ b/frame/assets/src/impl_stored_map.rs @@ -0,0 +1,58 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Assets pallet's `StoredMap` implementation. + +use super::*; + +impl, I: 'static> StoredMap<(T::AssetId, T::AccountId), T::Extra> for Pallet { + fn get(id_who: &(T::AssetId, T::AccountId)) -> T::Extra { + let &(id, ref who) = id_who; + if Account::::contains_key(id, who) { + Account::::get(id, who).extra + } else { + Default::default() + } + } + + fn try_mutate_exists>( + id_who: &(T::AssetId, T::AccountId), + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + let &(id, ref who) = id_who; + let mut maybe_extra = Some(Account::::get(id, who).extra); + let r = f(&mut maybe_extra)?; + // They want to write some value or delete it. + // If the account existed and they want to write a value, then we write. + // If the account didn't exist and they want to delete it, then we let it pass. + // Otherwise, we fail. + Account::::try_mutate_exists(id, who, |maybe_account| { + if let Some(extra) = maybe_extra { + // They want to write a value. Let this happen only if the account actually exists. + if let Some(ref mut account) = maybe_account { + account.extra = extra; + } else { + Err(StoredMapError::NoProviders)?; + } + } else { + // They want to delete it. Let this pass if the item never existed anyway. + ensure!(maybe_account.is_none(), StoredMapError::ConsumerRemaining); + } + Ok(r) + }) + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 65630cf1ba565..e8dfd50f4086a 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -130,160 +130,30 @@ pub mod mock; #[cfg(test)] mod tests; -use sp_std::prelude::*; +mod extra_mutator; +pub use extra_mutator::*; +mod impl_stored_map; +mod impl_fungibles; +mod functions; +mod types; +pub use types::*; + +use sp_std::{prelude::*, borrow::Borrow}; use sp_runtime::{ - RuntimeDebug, - traits::{ - AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd, + RuntimeDebug, TokenError, traits::{ + AtLeast32BitUnsigned, Zero, StaticLookup, Saturating, CheckedSub, CheckedAdd, Bounded, + StoredMapError, } }; use codec::{Encode, Decode, HasCompact}; use frame_support::{ensure, dispatch::{DispatchError, DispatchResult}}; -use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved, Fungibles}; +use frame_support::traits::{Currency, ReservableCurrency, BalanceStatus::Reserved, StoredMap}; +use frame_support::traits::tokens::{WithdrawConsequence, DepositConsequence, fungibles}; use frame_system::Config as SystemConfig; + pub use weights::WeightInfo; pub use pallet::*; -impl Fungibles<::AccountId> for Pallet { - type AssetId = T::AssetId; - type Balance = T::Balance; - - fn balance( - asset: Self::AssetId, - who: &::AccountId, - ) -> Self::Balance { - Pallet::::balance(asset, who) - } - - fn can_deposit( - asset: Self::AssetId, - who: &::AccountId, - amount: Self::Balance, - ) -> bool { - Pallet::::can_deposit(asset, who, amount) - } - - fn deposit( - asset: Self::AssetId, - who: ::AccountId, - amount: Self::Balance, - ) -> DispatchResult { - Pallet::::increase_balance(asset, who, amount, None) - } - - fn withdraw( - asset: Self::AssetId, - who: ::AccountId, - amount: Self::Balance, - ) -> DispatchResult { - Pallet::::reduce_balance(asset, who, amount, None) - } -} - -type DepositBalanceOf = <::Currency as Currency<::AccountId>>::Balance; - -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct AssetDetails< - Balance, - AccountId, - DepositBalance, -> { - /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. - owner: AccountId, - /// Can mint tokens. - issuer: AccountId, - /// Can thaw tokens, force transfers and burn tokens from any account. - admin: AccountId, - /// Can freeze tokens. - freezer: AccountId, - /// The total supply across all accounts. - supply: Balance, - /// The balance deposited for this asset. This pays for the data stored here. - deposit: DepositBalance, - /// The ED for virtual accounts. - min_balance: Balance, - /// If `true`, then any account with this asset is given a provider reference. Otherwise, it - /// requires a consumer reference. - is_sufficient: bool, - /// The total number of accounts. - accounts: u32, - /// The total number of accounts for which we have placed a self-sufficient reference. - sufficients: u32, - /// The total number of approvals. - approvals: u32, - /// Whether the asset is frozen for non-admin transfers. - is_frozen: bool, -} - -impl AssetDetails { - pub fn destroy_witness(&self) -> DestroyWitness { - DestroyWitness { - accounts: self.accounts, - sufficients: self.sufficients, - approvals: self.approvals, - } - } -} - -/// A pair to act as a key for the approval storage map. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct ApprovalKey { - /// The owner of the funds that are being approved. - owner: AccountId, - /// The party to whom transfer of the funds is being delegated. - delegate: AccountId, -} - -/// Data concerning an approval. -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] -pub struct Approval { - /// The amount of funds approved for the balance transfer from the owner to some delegated - /// target. - amount: Balance, - /// The amount reserved on the owner's account to hold this item in storage. - deposit: DepositBalance, -} - -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] -pub struct AssetBalance { - /// The balance. - balance: Balance, - /// Whether the account is frozen. - is_frozen: bool, - /// `true` if this balance gave the account a self-sufficient reference. - sufficient: bool, -} - -#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] -pub struct AssetMetadata { - /// The balance deposited for this metadata. - /// - /// This pays for the data stored in this struct. - deposit: DepositBalance, - /// The user friendly name of this asset. Limited in length by `StringLimit`. - name: Vec, - /// The ticker symbol for this asset. Limited in length by `StringLimit`. - symbol: Vec, - /// The number of decimals this asset uses to represent one unit. - decimals: u8, - /// Whether the asset metadata may be changed by a non Force origin. - is_frozen: bool, -} - -/// Witness data for the destroy transactions. -#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub struct DestroyWitness { - /// The number of accounts holding the asset. - #[codec(compact)] - accounts: u32, - /// The number of accounts holding the asset with a self-sufficient reference. - #[codec(compact)] - sufficients: u32, - /// The number of transfer-approvals of the asset. - #[codec(compact)] - approvals: u32, -} - #[frame_support::pallet] pub mod pallet { use frame_support::{ @@ -295,13 +165,13 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] - pub struct Pallet(_); + pub struct Pallet(_); #[pallet::config] /// The module configuration trait. - pub trait Config: frame_system::Config { + pub trait Config: frame_system::Config { /// The overarching event type. - type Event: From> + IsType<::Event>; + type Event: From> + IsType<::Event>; /// The units in which we record balances. type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy; @@ -317,73 +187,84 @@ pub mod pallet { type ForceOrigin: EnsureOrigin; /// The basic amount of funds that must be reserved for an asset. - type AssetDeposit: Get>; + type AssetDeposit: Get>; /// The basic amount of funds that must be reserved when adding metadata to your asset. - type MetadataDepositBase: Get>; + type MetadataDepositBase: Get>; /// The additional funds that must be reserved for the number of bytes you store in your /// metadata. - type MetadataDepositPerByte: Get>; + type MetadataDepositPerByte: Get>; /// The amount of funds that must be reserved when creating a new approval. - type ApprovalDeposit: Get>; + type ApprovalDeposit: Get>; /// The maximum length of a name or symbol stored on-chain. type StringLimit: Get; + /// A hook to allow a per-asset, per-account minimum balance to be enforced. This must be + /// respected in all permissionless operations. + type Freezer: FrozenBalance; + + /// Additional data to be stored with an account's asset balance. + type Extra: Member + Parameter + Default; + /// Weight information for extrinsics in this pallet. type WeightInfo: WeightInfo; } #[pallet::storage] /// Details of an asset. - pub(super) type Asset = StorageMap< + pub(super) type Asset, I: 'static = ()> = StorageMap< _, Blake2_128Concat, T::AssetId, - AssetDetails>, + AssetDetails>, >; #[pallet::storage] /// The number of units of assets held by any given account. - pub(super) type Account = StorageDoubleMap< + pub(super) type Account, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, T::AssetId, Blake2_128Concat, T::AccountId, - AssetBalance, + AssetBalance, ValueQuery, >; #[pallet::storage] /// Approved balance transfers. First balance is the amount approved for transfer. Second /// is the amount of `T::Currency` reserved for storing this. - pub(super) type Approvals = StorageDoubleMap< + pub(super) type Approvals, I: 'static = ()> = StorageDoubleMap< _, Blake2_128Concat, T::AssetId, Blake2_128Concat, ApprovalKey, - Approval>, + Approval>, OptionQuery, >; #[pallet::storage] /// Metadata of an asset. - pub(super) type Metadata = StorageMap< + pub(super) type Metadata, I: 'static = ()> = StorageMap< _, Blake2_128Concat, T::AssetId, - AssetMetadata>, + AssetMetadata>, ValueQuery, >; #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] - #[pallet::metadata(T::AccountId = "AccountId", T::Balance = "Balance", T::AssetId = "AssetId")] - pub enum Event { + #[pallet::metadata( + T::AccountId = "AccountId", + T::Balance = "Balance", + T::AssetId = "AssetId" + )] + pub enum Event, I: 'static = ()> { /// Some asset class was created. \[asset_id, creator, owner\] Created(T::AssetId, T::AccountId, T::AccountId), /// Some assets were issued. \[asset_id, owner, total_supply\] @@ -428,7 +309,7 @@ pub mod pallet { } #[pallet::error] - pub enum Error { + pub enum Error { /// Account balance must be greater than or equal to the transfer amount. BalanceLow, /// Balance should be non-zero. @@ -458,10 +339,10 @@ pub mod pallet { } #[pallet::hooks] - impl Hooks> for Pallet {} + impl, I: 'static> Hooks> for Pallet {} #[pallet::call] - impl Pallet { + impl, I: 'static> Pallet { /// Issue a new class of fungible assets from a public origin. /// /// This new asset class has no assets initially and its owner is the origin. @@ -491,26 +372,29 @@ pub mod pallet { let owner = ensure_signed(origin)?; let admin = T::Lookup::lookup(admin)?; - ensure!(!Asset::::contains_key(id), Error::::InUse); - ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); let deposit = T::AssetDeposit::get(); T::Currency::reserve(&owner, deposit)?; - Asset::::insert(id, AssetDetails { - owner: owner.clone(), - issuer: admin.clone(), - admin: admin.clone(), - freezer: admin.clone(), - supply: Zero::zero(), - deposit, - min_balance, - is_sufficient: false, - accounts: 0, - sufficients: 0, - approvals: 0, - is_frozen: false, - }); + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: admin.clone(), + admin: admin.clone(), + freezer: admin.clone(), + supply: Zero::zero(), + deposit, + min_balance, + is_sufficient: false, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); Self::deposit_event(Event::Created(id, owner, admin)); Ok(()) } @@ -547,23 +431,26 @@ pub mod pallet { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; - ensure!(!Asset::::contains_key(id), Error::::InUse); - ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); - - Asset::::insert(id, AssetDetails { - owner: owner.clone(), - issuer: owner.clone(), - admin: owner.clone(), - freezer: owner.clone(), - supply: Zero::zero(), - deposit: Zero::zero(), - min_balance, - is_sufficient, - accounts: 0, - sufficients: 0, - approvals: 0, - is_frozen: false, - }); + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + min_balance, + is_sufficient, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); Self::deposit_event(Event::ForceCreated(id, owner)); Ok(()) } @@ -596,25 +483,28 @@ pub mod pallet { Ok(_) => None, Err(origin) => Some(ensure_signed(origin)?), }; - Asset::::try_mutate_exists(id, |maybe_details| { - let mut details = maybe_details.take().ok_or(Error::::Unknown)?; + Asset::::try_mutate_exists(id, |maybe_details| { + let mut details = maybe_details.take().ok_or(Error::::Unknown)?; if let Some(check_owner) = maybe_check_owner { - ensure!(details.owner == check_owner, Error::::NoPermission); + ensure!(details.owner == check_owner, Error::::NoPermission); } - ensure!(details.accounts == witness.accounts, Error::::BadWitness); - ensure!(details.sufficients == witness.sufficients, Error::::BadWitness); - ensure!(details.approvals == witness.approvals, Error::::BadWitness); + ensure!(details.accounts == witness.accounts, Error::::BadWitness); + ensure!(details.sufficients == witness.sufficients, Error::::BadWitness); + ensure!(details.approvals == witness.approvals, Error::::BadWitness); - for (who, v) in Account::::drain_prefix(id) { - Self::dead_account(&who, &mut details, v.sufficient); + for (who, v) in Account::::drain_prefix(id) { + Self::dead_account(id, &who, &mut details, v.sufficient); } debug_assert_eq!(details.accounts, 0); debug_assert_eq!(details.sufficients, 0); - let metadata = Metadata::::take(&id); - T::Currency::unreserve(&details.owner, details.deposit.saturating_add(metadata.deposit)); + let metadata = Metadata::::take(&id); + T::Currency::unreserve( + &details.owner, + details.deposit.saturating_add(metadata.deposit), + ); - Approvals::::remove_prefix(&id); + Approvals::::remove_prefix(&id); Self::deposit_event(Event::Destroyed(id)); // NOTE: could use postinfo to reflect the actual number of accounts/sufficient/approvals @@ -643,7 +533,9 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; let beneficiary = T::Lookup::lookup(beneficiary)?; - Self::increase_balance(id, beneficiary, amount, Some(origin)) + Self::do_mint(id, &beneficiary, amount, Some(origin))?; + Self::deposit_event(Event::Issued(id, beneficiary, amount)); + Ok(()) } /// Reduce the balance of `who` by as much as possible up to `amount` assets of `id`. @@ -671,7 +563,10 @@ pub mod pallet { let origin = ensure_signed(origin)?; let who = T::Lookup::lookup(who)?; - Self::reduce_balance(id, who, amount, Some(origin)) + let f = DebitFlags { keep_alive: false, best_effort: true }; + let burned = Self::do_burn(id, &who, amount, Some(origin), f)?; + Self::deposit_event(Event::Burned(id, who, burned)); + Ok(()) } /// Move some assets from the sender account to another. @@ -702,7 +597,12 @@ pub mod pallet { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(target)?; - Self::do_transfer(id, origin, dest, amount, None, false) + let f = TransferFlags { + keep_alive: false, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &origin, &dest, amount, None, f).map(|_| ()) } /// Move some assets from the sender account to another, keeping the sender account alive. @@ -730,10 +630,15 @@ pub mod pallet { target: ::Source, #[pallet::compact] amount: T::Balance ) -> DispatchResult { - let origin = ensure_signed(origin)?; + let source = ensure_signed(origin)?; let dest = T::Lookup::lookup(target)?; - Self::do_transfer(id, origin, dest, amount, None, true) + let f = TransferFlags { + keep_alive: true, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &source, &dest, amount, None, f).map(|_| ()) } /// Move some assets from one account to another. @@ -767,7 +672,12 @@ pub mod pallet { let source = T::Lookup::lookup(source)?; let dest = T::Lookup::lookup(dest)?; - Self::do_transfer(id, source, dest, amount, Some(origin), false) + let f = TransferFlags { + keep_alive: false, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &source, &dest, amount, Some(origin), f).map(|_| ()) } /// Disallow further unprivileged transfers from an account. @@ -788,14 +698,17 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(&origin == &d.freezer, Error::::NoPermission); + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + ensure!( + Account::::contains_key(id, &who), + Error::::BalanceZero + ); - Account::::mutate(id, &who, |a| a.is_frozen = true); + Account::::mutate(id, &who, |a| a.is_frozen = true); - Self::deposit_event(Event::::Frozen(id, who)); + Self::deposit_event(Event::::Frozen(id, who)); Ok(()) } @@ -818,14 +731,17 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let details = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(&origin == &details.admin, Error::::NoPermission); + let details = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &details.admin, Error::::NoPermission); let who = T::Lookup::lookup(who)?; - ensure!(Account::::contains_key(id, &who), Error::::BalanceZero); + ensure!( + Account::::contains_key(id, &who), + Error::::BalanceZero + ); - Account::::mutate(id, &who, |a| a.is_frozen = false); + Account::::mutate(id, &who, |a| a.is_frozen = false); - Self::deposit_event(Event::::Thawed(id, who)); + Self::deposit_event(Event::::Thawed(id, who)); Ok(()) } @@ -845,13 +761,13 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - Asset::::try_mutate(id, |maybe_details| { - let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(&origin == &d.freezer, Error::::NoPermission); + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.freezer, Error::::NoPermission); d.is_frozen = true; - Self::deposit_event(Event::::AssetFrozen(id)); + Self::deposit_event(Event::::AssetFrozen(id)); Ok(()) }) } @@ -872,13 +788,13 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - Asset::::try_mutate(id, |maybe_details| { - let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(&origin == &d.admin, Error::::NoPermission); + Asset::::try_mutate(id, |maybe_details| { + let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &d.admin, Error::::NoPermission); d.is_frozen = false; - Self::deposit_event(Event::::AssetThawed(id)); + Self::deposit_event(Event::::AssetThawed(id)); Ok(()) }) } @@ -902,12 +818,14 @@ pub mod pallet { let origin = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; - Asset::::try_mutate(id, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(&origin == &details.owner, Error::::NoPermission); - if details.owner == owner { return Ok(()) } + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); + if details.owner == owner { + return Ok(()); + } - let metadata_deposit = Metadata::::get(id).deposit; + let metadata_deposit = Metadata::::get(id).deposit; let deposit = details.deposit + metadata_deposit; // Move the deposit to the new owner. @@ -945,9 +863,9 @@ pub mod pallet { let admin = T::Lookup::lookup(admin)?; let freezer = T::Lookup::lookup(freezer)?; - Asset::::try_mutate(id, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(&origin == &details.owner, Error::::NoPermission); + Asset::::try_mutate(id, |maybe_details| { + let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; + ensure!(&origin == &details.owner, Error::::NoPermission); details.issuer = issuer.clone(); details.admin = admin.clone(); @@ -984,14 +902,17 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - ensure!(name.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); - ensure!(symbol.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); + ensure!(name.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); + ensure!(symbol.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(&origin == &d.owner, Error::::NoPermission); + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.owner, Error::::NoPermission); - Metadata::::try_mutate_exists(id, |metadata| { - ensure!(metadata.as_ref().map_or(true, |m| !m.is_frozen), Error::::NoPermission); + Metadata::::try_mutate_exists(id, |metadata| { + ensure!( + metadata.as_ref().map_or(true, |m| !m.is_frozen), + Error::::NoPermission + ); let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); let new_deposit = T::MetadataDepositPerByte::get() @@ -1035,11 +956,11 @@ pub mod pallet { ) -> DispatchResult { let origin = ensure_signed(origin)?; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(&origin == &d.owner, Error::::NoPermission); + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.owner, Error::::NoPermission); - Metadata::::try_mutate_exists(id, |metadata| { - let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; + Metadata::::try_mutate_exists(id, |metadata| { + let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&d.owner, deposit); Self::deposit_event(Event::MetadataCleared(id)); Ok(()) @@ -1071,11 +992,11 @@ pub mod pallet { ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; - ensure!(name.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); - ensure!(symbol.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); + ensure!(name.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); + ensure!(symbol.len() <= T::StringLimit::get() as usize, Error::::BadMetadata); - ensure!(Asset::::contains_key(id), Error::::Unknown); - Metadata::::try_mutate_exists(id, |metadata| { + ensure!(Asset::::contains_key(id), Error::::Unknown); + Metadata::::try_mutate_exists(id, |metadata| { let deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); *metadata = Some(AssetMetadata { deposit, @@ -1108,9 +1029,9 @@ pub mod pallet { ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - Metadata::::try_mutate_exists(id, |metadata| { - let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + Metadata::::try_mutate_exists(id, |metadata| { + let deposit = metadata.take().ok_or(Error::::Unknown)?.deposit; T::Currency::unreserve(&d.owner, deposit); Self::deposit_event(Event::MetadataCleared(id)); Ok(()) @@ -1153,8 +1074,8 @@ pub mod pallet { ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; - Asset::::try_mutate(id, |maybe_asset| { - let mut asset = maybe_asset.take().ok_or(Error::::Unknown)?; + Asset::::try_mutate(id, |maybe_asset| { + let mut asset = maybe_asset.take().ok_or(Error::::Unknown)?; asset.owner = T::Lookup::lookup(owner)?; asset.issuer = T::Lookup::lookup(issuer)?; asset.admin = T::Lookup::lookup(admin)?; @@ -1200,7 +1121,7 @@ pub mod pallet { let delegate = T::Lookup::lookup(delegate)?; let key = ApprovalKey { owner, delegate }; - Approvals::::try_mutate(id, &key, |maybe_approved| -> DispatchResult { + Approvals::::try_mutate(id, &key, |maybe_approved| -> DispatchResult { let mut approved = maybe_approved.take().unwrap_or_default(); let deposit_required = T::ApprovalDeposit::get(); if approved.deposit < deposit_required { @@ -1238,7 +1159,7 @@ pub mod pallet { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; let key = ApprovalKey { owner, delegate }; - let approval = Approvals::::take(id, &key).ok_or(Error::::Unknown)?; + let approval = Approvals::::take(id, &key).ok_or(Error::::Unknown)?; T::Currency::unreserve(&key.owner, approval.deposit); Self::deposit_event(Event::ApprovalCancelled(id, key.owner, key.delegate)); @@ -1269,8 +1190,8 @@ pub mod pallet { .map(|_| ()) .or_else(|origin| -> DispatchResult { let origin = ensure_signed(origin)?; - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(&origin == &d.admin, Error::::NoPermission); + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(&origin == &d.admin, Error::::NoPermission); Ok(()) })?; @@ -1278,7 +1199,7 @@ pub mod pallet { let delegate = T::Lookup::lookup(delegate)?; let key = ApprovalKey { owner, delegate }; - let approval = Approvals::::take(id, &key).ok_or(Error::::Unknown)?; + let approval = Approvals::::take(id, &key).ok_or(Error::::Unknown)?; T::Currency::unreserve(&key.owner, approval.deposit); Self::deposit_event(Event::ApprovalCancelled(id, key.owner, key.delegate)); @@ -1316,11 +1237,19 @@ pub mod pallet { let destination = T::Lookup::lookup(destination)?; let key = ApprovalKey { owner, delegate }; - Approvals::::try_mutate_exists(id, &key, |maybe_approved| -> DispatchResult { - let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; - let remaining = approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; - - Self::do_transfer(id, key.owner.clone(), destination, amount, None, false)?; + Approvals::::try_mutate_exists(id, &key, |maybe_approved| -> DispatchResult { + let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; + let remaining = approved + .amount + .checked_sub(&amount) + .ok_or(Error::::Unapproved)?; + + let f = TransferFlags { + keep_alive: false, + best_effort: false, + burn_dust: false + }; + Self::do_transfer(id, &key.owner, &destination, amount, None, f)?; if remaining.is_zero() { T::Currency::unreserve(&key.owner, approved.deposit); @@ -1334,186 +1263,3 @@ pub mod pallet { } } } - -// The main implementation block for the module. -impl Pallet { - // Public immutables - - /// Get the asset `id` balance of `who`. - pub fn balance(id: T::AssetId, who: impl sp_std::borrow::Borrow) -> T::Balance { - Account::::get(id, who.borrow()).balance - } - - /// Get the total supply of an asset `id`. - pub fn total_supply(id: T::AssetId) -> T::Balance { - Asset::::get(id).map(|x| x.supply).unwrap_or_else(Zero::zero) - } - - fn new_account( - who: &T::AccountId, - d: &mut AssetDetails>, - ) -> Result { - let accounts = d.accounts.checked_add(1).ok_or(Error::::Overflow)?; - let is_sufficient = if d.is_sufficient { - frame_system::Pallet::::inc_sufficients(who); - d.sufficients += 1; - true - } else { - frame_system::Pallet::::inc_consumers(who).map_err(|_| Error::::NoProvider)?; - false - }; - d.accounts = accounts; - Ok(is_sufficient) - } - - fn dead_account( - who: &T::AccountId, - d: &mut AssetDetails>, - sufficient: bool, - ) { - if sufficient { - d.sufficients = d.sufficients.saturating_sub(1); - frame_system::Pallet::::dec_sufficients(who); - } else { - frame_system::Pallet::::dec_consumers(who); - } - d.accounts = d.accounts.saturating_sub(1); - } - - fn can_deposit(id: T::AssetId, who: &T::AccountId, amount: T::Balance) -> bool { - let details = match Asset::::get(id) { - Some(details) => details, - None => return false, - }; - if details.supply.checked_add(&amount).is_none() { return false } - let account = Account::::get(id, who); - if account.balance.checked_add(&amount).is_none() { return false } - if account.balance.is_zero() { - if amount < details.min_balance { return false } - if !details.is_sufficient && frame_system::Pallet::::providers(who) == 0 { return false } - if details.is_sufficient && details.sufficients.checked_add(1).is_none() { return false } - } - - true - } - - fn increase_balance( - id: T::AssetId, - beneficiary: T::AccountId, - amount: T::Balance, - maybe_check_issuer: Option, - ) -> DispatchResult { - Asset::::try_mutate(id, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - - if let Some(check_issuer) = maybe_check_issuer { - ensure!(&check_issuer == &details.issuer, Error::::NoPermission); - } - details.supply = details.supply.checked_add(&amount).ok_or(Error::::Overflow)?; - - Account::::try_mutate(id, &beneficiary, |t| -> DispatchResult { - let new_balance = t.balance.saturating_add(amount); - ensure!(new_balance >= details.min_balance, Error::::BalanceLow); - if t.balance.is_zero() { - t.sufficient = Self::new_account(&beneficiary, details)?; - } - t.balance = new_balance; - Ok(()) - })?; - Self::deposit_event(Event::Issued(id, beneficiary, amount)); - Ok(()) - }) - } - - fn reduce_balance( - id: T::AssetId, - target: T::AccountId, - amount: T::Balance, - maybe_check_admin: Option, - ) -> DispatchResult { - Asset::::try_mutate(id, |maybe_details| { - let d = maybe_details.as_mut().ok_or(Error::::Unknown)?; - if let Some(check_admin) = maybe_check_admin { - ensure!(&check_admin == &d.admin, Error::::NoPermission); - } - - let burned = Account::::try_mutate_exists( - id, - &target, - |maybe_account| -> Result { - let mut account = maybe_account.take().ok_or(Error::::BalanceZero)?; - let mut burned = amount.min(account.balance); - account.balance -= burned; - *maybe_account = if account.balance < d.min_balance { - burned += account.balance; - Self::dead_account(&target, d, account.sufficient); - None - } else { - Some(account) - }; - Ok(burned) - } - )?; - - d.supply = d.supply.saturating_sub(burned); - - Self::deposit_event(Event::Burned(id, target, burned)); - Ok(()) - }) - } - - fn do_transfer( - id: T::AssetId, - source: T::AccountId, - dest: T::AccountId, - amount: T::Balance, - maybe_need_admin: Option, - keep_alive: bool, - ) -> DispatchResult { - let mut source_account = Account::::get(id, &source); - ensure!(!source_account.is_frozen, Error::::Frozen); - - source_account.balance = source_account.balance.checked_sub(&amount) - .ok_or(Error::::BalanceLow)?; - - Asset::::try_mutate(id, |maybe_details| { - let details = maybe_details.as_mut().ok_or(Error::::Unknown)?; - ensure!(!details.is_frozen, Error::::Frozen); - - if let Some(need_admin) = maybe_need_admin { - ensure!(&need_admin == &details.admin, Error::::NoPermission); - } - - if dest != source && !amount.is_zero() { - let mut amount = amount; - if source_account.balance < details.min_balance { - ensure!(!keep_alive, Error::::WouldDie); - amount += source_account.balance; - source_account.balance = Zero::zero(); - } - - Account::::try_mutate(id, &dest, |a| -> DispatchResult { - let new_balance = a.balance.saturating_add(amount); - - ensure!(new_balance >= details.min_balance, Error::::BalanceLow); - - if a.balance.is_zero() { - a.sufficient = Self::new_account(&dest, details)?; - } - a.balance = new_balance; - Ok(()) - })?; - - if source_account.balance.is_zero() { - Self::dead_account(&source, details, source_account.sufficient); - Account::::remove(id, &source); - } else { - Account::::insert(id, &source, &source_account) - } - } - - Self::deposit_event(Event::Transferred(id, source, dest, amount)); - Ok(()) - }) - } -} diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 806d85ce71947..0b7aa339835ec 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -65,6 +65,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -100,7 +101,42 @@ impl Config for Test { type MetadataDepositPerByte = MetadataDepositPerByte; type ApprovalDeposit = ApprovalDeposit; type StringLimit = StringLimit; + type Freezer = TestFreezer; type WeightInfo = (); + type Extra = (); +} + +use std::cell::RefCell; +use std::collections::HashMap; + +#[derive(Copy, Clone, Eq, PartialEq, Debug)] +pub(crate) enum Hook { + Died(u32, u64), +} +thread_local! { + static FROZEN: RefCell> = RefCell::new(Default::default()); + static HOOKS: RefCell> = RefCell::new(Default::default()); +} + +pub struct TestFreezer; +impl FrozenBalance for TestFreezer { + fn frozen_balance(asset: u32, who: &u64) -> Option { + FROZEN.with(|f| f.borrow().get(&(asset, who.clone())).cloned()) + } + + fn died(asset: u32, who: &u64) { + HOOKS.with(|h| h.borrow_mut().push(Hook::Died(asset, who.clone()))); + } +} + +pub(crate) fn set_frozen_balance(asset: u32, who: u64, amount: u64) { + FROZEN.with(|f| f.borrow_mut().insert((asset, who), amount)); +} +pub(crate) fn clear_frozen_balance(asset: u32, who: u64) { + FROZEN.with(|f| f.borrow_mut().remove(&(asset, who))); +} +pub(crate) fn hooks() -> Vec { + HOOKS.with(|h| h.borrow().clone()) } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index 1fe9358dcbff7..f4976af023627 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -19,6 +19,7 @@ use super::*; use crate::{Error, mock::*}; +use sp_runtime::TokenError; use frame_support::{assert_ok, assert_noop, traits::Currency}; use pallet_balances::Error as BalancesError; @@ -198,11 +199,11 @@ fn non_providing_should_work() { assert_ok!(Assets::mint(Origin::signed(1), 0, 0, 100)); // Cannot mint into account 2 since it doesn't (yet) exist... - assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), Error::::NoProvider); + assert_noop!(Assets::mint(Origin::signed(1), 0, 1, 100), TokenError::CannotCreate); // ...or transfer... - assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), Error::::NoProvider); + assert_noop!(Assets::transfer(Origin::signed(0), 0, 1, 50), TokenError::CannotCreate); // ...or force-transfer - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), Error::::NoProvider); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 0, 1, 50), TokenError::CannotCreate); Balances::make_free_balance_be(&1, 100); Balances::make_free_balance_be(&2, 100); @@ -219,12 +220,11 @@ fn min_balance_should_work() { assert_eq!(Asset::::get(0).unwrap().accounts, 1); // Cannot create a new account with a balance that is below minimum... - assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), Error::::BalanceLow); - assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), Error::::BalanceLow); + assert_noop!(Assets::mint(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 9), TokenError::BelowMinimum); + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 9), TokenError::BelowMinimum); // When deducting from an account to below minimum, it should be reaped. - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 91)); assert!(Assets::balance(0, 1).is_zero()); assert_eq!(Assets::balance(0, 2), 100); @@ -277,7 +277,7 @@ fn transferring_enough_to_kill_source_when_keep_alive_should_fail() { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), Error::::WouldDie); + assert_noop!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 91), Error::::BalanceLow); assert_ok!(Assets::transfer_keep_alive(Origin::signed(1), 0, 2, 90)); assert_eq!(Assets::balance(0, 1), 10); assert_eq!(Assets::balance(0, 2), 90); @@ -430,12 +430,14 @@ fn burning_asset_balance_with_positive_balance_should_work() { } #[test] -fn burning_asset_balance_with_zero_balance_should_not_work() { +fn burning_asset_balance_with_zero_balance_does_nothing() { new_test_ext().execute_with(|| { assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value()), Error::::BalanceZero); + assert_ok!(Assets::burn(Origin::signed(1), 0, 2, u64::max_value())); + assert_eq!(Assets::balance(0, 2), 0); + assert_eq!(Assets::total_supply(0), 100); }); } @@ -490,4 +492,143 @@ fn set_metadata_should_work() { }); } -// TODO: tests for force_set_metadata, force_clear_metadata, force_asset_status +#[test] +fn freezer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 10)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + assert_eq!(Assets::balance(0, 1), 100); + + + // freeze 50 of it. + set_frozen_balance(0, 1, 50); + + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 20)); + // cannot transfer another 21 away as this would take the non-frozen balance (30) to below + // the minimum balance (10). + assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 21), Error::::BalanceLow); + + // create an approved transfer... + Balances::make_free_balance_be(&1, 100); + assert_ok!(Assets::approve_transfer(Origin::signed(1), 0, 2, 50)); + let e = Error::::BalanceLow; + // ...but that wont work either: + assert_noop!(Assets::transfer_approved(Origin::signed(2), 0, 1, 2, 21), e); + // a force transfer won't work also. + let e = Error::::BalanceLow; + assert_noop!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21), e); + + // reduce it to only 49 frozen... + set_frozen_balance(0, 1, 49); + // ...and it's all good: + assert_ok!(Assets::force_transfer(Origin::signed(1), 0, 1, 2, 21)); + + // and if we clear it, we can remove the account completely. + clear_frozen_balance(0, 1); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(hooks(), vec![Hook::Died(0, 1)]); + }); +} + +#[test] +fn imbalances_should_work() { + use frame_support::traits::tokens::fungibles::Balanced; + + new_test_ext().execute_with(|| { + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + + let imb = Assets::issue(0, 100); + assert_eq!(Assets::total_supply(0), 100); + assert_eq!(imb.peek(), 100); + + let (imb1, imb2) = imb.split(30); + assert_eq!(imb1.peek(), 30); + assert_eq!(imb2.peek(), 70); + + drop(imb2); + assert_eq!(Assets::total_supply(0), 30); + + assert!(Assets::resolve(&1, imb1).is_ok()); + assert_eq!(Assets::balance(0, 1), 30); + assert_eq!(Assets::total_supply(0), 30); + }); +} + +#[test] +fn force_metadata_should_work() { + new_test_ext().execute_with(|| { + //force set metadata works + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_set_metadata(Origin::root(), 0, vec![0u8; 10], vec![0u8; 10], 8, false)); + assert!(Metadata::::contains_key(0)); + + //overwrites existing metadata + let asset_original_metadata = Metadata::::get(0); + assert_ok!(Assets::force_set_metadata(Origin::root(), 0, vec![1u8; 10], vec![1u8; 10], 8, false)); + assert_ne!(Metadata::::get(0), asset_original_metadata); + + //attempt to set metadata for non-existent asset class + assert_noop!( + Assets::force_set_metadata(Origin::root(), 1, vec![0u8; 10], vec![0u8; 10], 8, false), + Error::::Unknown + ); + + //string length limit check + let limit = StringLimit::get() as usize; + assert_noop!( + Assets::force_set_metadata(Origin::root(), 0, vec![0u8; limit + 1], vec![0u8; 10], 8, false), + Error::::BadMetadata + ); + assert_noop!( + Assets::force_set_metadata(Origin::root(), 0, vec![0u8; 10], vec![0u8; limit + 1], 8, false), + Error::::BadMetadata + ); + + //force clear metadata works + assert!(Metadata::::contains_key(0)); + assert_ok!(Assets::force_clear_metadata(Origin::root(), 0)); + assert!(!Metadata::::contains_key(0)); + + //Error handles clearing non-existent asset class + assert_noop!(Assets::force_clear_metadata(Origin::root(), 1), Error::::Unknown); + }); +} + +#[test] +fn force_asset_status_should_work(){ + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&1, 10); + Balances::make_free_balance_be(&2, 10); + assert_ok!(Assets::create(Origin::signed(1), 0, 1, 30)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 50)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 2, 150)); + + //force asset status to change min_balance > balance + assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 100, true, false)); + assert_eq!(Assets::balance(0, 1), 50); + + //account can recieve assets for balance < min_balance + assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 1)); + assert_eq!(Assets::balance(0, 1), 51); + + //account on outbound transfer will cleanup for balance < min_balance + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 1)); + assert_eq!(Assets::balance(0,1), 0); + + //won't create new account with balance below min_balance + assert_noop!(Assets::transfer(Origin::signed(2), 0, 3, 50), TokenError::BelowMinimum); + + //force asset status will not execute for non-existent class + assert_noop!( + Assets::force_asset_status(Origin::root(), 1, 1, 1, 1, 1, 90, true, false), + Error::::Unknown + ); + + //account drains to completion when funds dip below min_balance + assert_ok!(Assets::force_asset_status(Origin::root(), 0, 1, 1, 1, 1, 110, true, false)); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 1, 110)); + assert_eq!(Assets::balance(0, 1), 200); + assert_eq!(Assets::balance(0, 2), 0); + assert_eq!(Assets::total_supply(0), 200); + }); +} diff --git a/frame/assets/src/types.rs b/frame/assets/src/types.rs new file mode 100644 index 0000000000000..f3f17c00a218f --- /dev/null +++ b/frame/assets/src/types.rs @@ -0,0 +1,187 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Various basic types for use in the assets pallet. + +use super::*; + +pub(super) type DepositBalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct AssetDetails< + Balance, + AccountId, + DepositBalance, +> { + /// Can change `owner`, `issuer`, `freezer` and `admin` accounts. + pub(super) owner: AccountId, + /// Can mint tokens. + pub(super) issuer: AccountId, + /// Can thaw tokens, force transfers and burn tokens from any account. + pub(super) admin: AccountId, + /// Can freeze tokens. + pub(super) freezer: AccountId, + /// The total supply across all accounts. + pub(super) supply: Balance, + /// The balance deposited for this asset. This pays for the data stored here. + pub(super) deposit: DepositBalance, + /// The ED for virtual accounts. + pub(super) min_balance: Balance, + /// If `true`, then any account with this asset is given a provider reference. Otherwise, it + /// requires a consumer reference. + pub(super) is_sufficient: bool, + /// The total number of accounts. + pub(super) accounts: u32, + /// The total number of accounts for which we have placed a self-sufficient reference. + pub(super) sufficients: u32, + /// The total number of approvals. + pub(super) approvals: u32, + /// Whether the asset is frozen for non-admin transfers. + pub(super) is_frozen: bool, +} + +impl AssetDetails { + pub fn destroy_witness(&self) -> DestroyWitness { + DestroyWitness { + accounts: self.accounts, + sufficients: self.sufficients, + approvals: self.approvals, + } + } +} + +/// A pair to act as a key for the approval storage map. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct ApprovalKey { + /// The owner of the funds that are being approved. + pub(super) owner: AccountId, + /// The party to whom transfer of the funds is being delegated. + pub(super) delegate: AccountId, +} + +/// Data concerning an approval. +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct Approval { + /// The amount of funds approved for the balance transfer from the owner to some delegated + /// target. + pub(super) amount: Balance, + /// The amount reserved on the owner's account to hold this item in storage. + pub(super) deposit: DepositBalance, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetBalance { + /// The balance. + pub(super) balance: Balance, + /// Whether the account is frozen. + pub(super) is_frozen: bool, + /// `true` if this balance gave the account a self-sufficient reference. + pub(super) sufficient: bool, + /// Additional "sidecar" data, in case some other pallet wants to use this storage item. + pub(super) extra: Extra, +} + +#[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, Default)] +pub struct AssetMetadata { + /// The balance deposited for this metadata. + /// + /// This pays for the data stored in this struct. + pub(super) deposit: DepositBalance, + /// The user friendly name of this asset. Limited in length by `StringLimit`. + pub(super) name: Vec, + /// The ticker symbol for this asset. Limited in length by `StringLimit`. + pub(super) symbol: Vec, + /// The number of decimals this asset uses to represent one unit. + pub(super) decimals: u8, + /// Whether the asset metadata may be changed by a non Force origin. + pub(super) is_frozen: bool, +} + +/// Witness data for the destroy transactions. +#[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] +pub struct DestroyWitness { + /// The number of accounts holding the asset. + #[codec(compact)] + pub(super) accounts: u32, + /// The number of accounts holding the asset with a self-sufficient reference. + #[codec(compact)] + pub(super) sufficients: u32, + /// The number of transfer-approvals of the asset. + #[codec(compact)] + pub(super) approvals: u32, +} + +/// Trait for allowing a minimum balance on the account to be specified, beyond the +/// `minimum_balance` of the asset. This is additive - the `minimum_balance` of the asset must be +/// met *and then* anything here in addition. +pub trait FrozenBalance { + /// Return the frozen balance. Under normal behaviour, this amount should always be + /// withdrawable. + /// + /// In reality, the balance of every account must be at least the sum of this (if `Some`) and + /// the asset's minimum_balance, since there may be complications to destroying an asset's + /// account completely. + /// + /// If `None` is returned, then nothing special is enforced. + /// + /// If any operation ever breaks this requirement (which will only happen through some sort of + /// privileged intervention), then `melted` is called to do any cleanup. + fn frozen_balance(asset: AssetId, who: &AccountId) -> Option; + + /// Called when an account has been removed. + fn died(asset: AssetId, who: &AccountId); +} + +impl FrozenBalance for () { + fn frozen_balance(_: AssetId, _: &AccountId) -> Option { None } + fn died(_: AssetId, _: &AccountId) {} +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(super) struct TransferFlags { + /// The debited account must stay alive at the end of the operation; an error is returned if + /// this cannot be achieved legally. + pub(super) keep_alive: bool, + /// Less than the amount specified needs be debited by the operation for it to be considered + /// successful. If `false`, then the amount debited will always be at least the amount + /// specified. + pub(super) best_effort: bool, + /// Any additional funds debited (due to minimum balance requirements) should be burned rather + /// than credited to the destination account. + pub(super) burn_dust: bool, +} + +#[derive(Copy, Clone, PartialEq, Eq)] +pub(super) struct DebitFlags { + /// The debited account must stay alive at the end of the operation; an error is returned if + /// this cannot be achieved legally. + pub(super) keep_alive: bool, + /// Less than the amount specified needs be debited by the operation for it to be considered + /// successful. If `false`, then the amount debited will always be at least the amount + /// specified. + pub(super) best_effort: bool, +} + +impl From for DebitFlags { + fn from(f: TransferFlags) -> Self { + Self { + keep_alive: f.keep_alive, + best_effort: f.best_effort, + } + } +} diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index a3b62d65e56a3..4fd1284893f99 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -28,7 +27,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "frame-support/std", "frame-system/std", diff --git a/frame/atomic-swap/src/lib.rs b/frame/atomic-swap/src/lib.rs index 536a452c115dc..513a9343a72e1 100644 --- a/frame/atomic-swap/src/lib.rs +++ b/frame/atomic-swap/src/lib.rs @@ -17,15 +17,15 @@ //! # Atomic Swap //! -//! A module for atomically sending funds. +//! A pallet for atomically sending funds. //! -//! - [`atomic_swap::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! ## Overview //! -//! A module for atomically sending funds from an origin to a target. A proof +//! A pallet for atomically sending funds from an origin to a target. A proof //! is used to allow the target to approve (claim) the swap. If the swap is not //! claimed within a specified duration of time, the sender may cancel it. //! @@ -33,9 +33,9 @@ //! //! ### Dispatchable Functions //! -//! * `create_swap` - called by a sender to register a new atomic swap -//! * `claim_swap` - called by the target to approve a swap -//! * `cancel_swap` - may be called by a sender after a specified duration +//! * [`create_swap`](Call::create_swap) - called by a sender to register a new atomic swap +//! * [`claim_swap`](Call::claim_swap) - called by the target to approve a swap +//! * [`cancel_swap`](Call::cancel_swap) - may be called by a sender after a specified duration // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -45,17 +45,16 @@ mod tests; use sp_std::{prelude::*, marker::PhantomData, ops::{Deref, DerefMut}}; use sp_io::hashing::blake2_256; use frame_support::{ - Parameter, decl_module, decl_storage, decl_event, decl_error, ensure, + RuntimeDebugNoBound, traits::{Get, Currency, ReservableCurrency, BalanceStatus}, weights::Weight, dispatch::DispatchResult, }; -use frame_system::{self as system, ensure_signed}; use codec::{Encode, Decode}; use sp_runtime::RuntimeDebug; /// Pending atomic swap operation. -#[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] +#[derive(Clone, Eq, PartialEq, RuntimeDebugNoBound, Encode, Decode)] pub struct PendingSwap { /// Source of the swap. pub source: T::AccountId, @@ -135,35 +134,50 @@ impl SwapAction for BalanceSwapAction> + Into<::Event>; - /// Swap action. - type SwapAction: SwapAction + Parameter; - /// Limit of proof size. - /// - /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs - /// on-chain. If A is the one that generates the proof, then it requires that either: - /// - A's blockchain has the same proof length limit as B's blockchain. - /// - Or A's blockchain has shorter proof length limit as B's blockchain. - /// - /// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse - /// to accept the atomic swap request if A generates the proof, and asks that B generates the - /// proof instead. - type ProofLimit: Get; -} - -decl_storage! { - trait Store for Module as AtomicSwap { - pub PendingSwaps: double_map - hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) HashedProof - => Option>; +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + /// Atomic swap's pallet configuration trait. + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + /// Swap action. + type SwapAction: SwapAction + Parameter; + /// Limit of proof size. + /// + /// Atomic swap is only atomic if once the proof is revealed, both parties can submit the proofs + /// on-chain. If A is the one that generates the proof, then it requires that either: + /// - A's blockchain has the same proof length limit as B's blockchain. + /// - Or A's blockchain has shorter proof length limit as B's blockchain. + /// + /// If B sees A is on a blockchain with larger proof length limit, then it should kindly refuse + /// to accept the atomic swap request if A generates the proof, and asks that B generates the + /// proof instead. + type ProofLimit: Get; } -} -decl_error! { - pub enum Error for Module { + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); + + #[pallet::storage] + pub type PendingSwaps = StorageDoubleMap<_, + Twox64Concat, T::AccountId, + Blake2_128Concat, HashedProof, + PendingSwap, + >; + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::error] + pub enum Error { /// Swap already exists. AlreadyExist, /// Swap proof is invalid. @@ -181,31 +195,27 @@ decl_error! { /// Duration has not yet passed for the swap to be cancelled. DurationNotPassed, } -} -decl_event!( /// Event of atomic swap pallet. - pub enum Event where - AccountId = ::AccountId, - PendingSwap = PendingSwap, - { + #[pallet::event] + #[pallet::metadata(T::AccountId = "AccountId", PendingSwap = "PendingSwap")] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { /// Swap created. \[account, proof, swap\] - NewSwap(AccountId, HashedProof, PendingSwap), + NewSwap(T::AccountId, HashedProof, PendingSwap), /// Swap claimed. The last parameter indicates whether the execution succeeds. /// \[account, proof, success\] - SwapClaimed(AccountId, HashedProof, bool), + SwapClaimed(T::AccountId, HashedProof, bool), /// Swap cancelled. \[account, proof\] - SwapCancelled(AccountId, HashedProof), + SwapCancelled(T::AccountId, HashedProof), } -); - -decl_module! { - /// Module definition of atomic swap pallet. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - fn deposit_event() = default; + /// Old name generated by `decl_event`. + #[deprecated(note="use `Event` instead")] + pub type RawEvent = Event; + #[pallet::call] + impl Pallet { /// Register a new atomic swap, declaring an intention to send funds from origin to target /// on the current blockchain. The target can claim the fund using the revealed proof. If /// the fund is not claimed after `duration` blocks, then the sender can cancel the swap. @@ -218,14 +228,14 @@ decl_module! { /// - `duration`: Locked duration of the atomic swap. For safety reasons, it is recommended /// that the revealer uses a shorter duration than the counterparty, to prevent the /// situation where the revealer reveals the proof too late around the end block. - #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] - fn create_swap( - origin, + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + pub(crate) fn create_swap( + origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, action: T::SwapAction, duration: T::BlockNumber, - ) { + ) -> DispatchResult { let source = ensure_signed(origin)?; ensure!( !PendingSwaps::::contains_key(&target, hashed_proof), @@ -242,8 +252,10 @@ decl_module! { PendingSwaps::::insert(target.clone(), hashed_proof.clone(), swap.clone()); Self::deposit_event( - RawEvent::NewSwap(target, hashed_proof, swap) + Event::NewSwap(target, hashed_proof, swap) ); + + Ok(()) } /// Claim an atomic swap. @@ -253,13 +265,14 @@ decl_module! { /// - `proof`: Revealed proof of the claim. /// - `action`: Action defined in the swap, it must match the entry in blockchain. Otherwise /// the operation fails. This is used for weight calculation. - #[weight = T::DbWeight::get().reads_writes(1, 1) - .saturating_add(40_000_000) - .saturating_add((proof.len() as Weight).saturating_mul(100)) - .saturating_add(action.weight()) - ] - fn claim_swap( - origin, + #[pallet::weight( + T::DbWeight::get().reads_writes(1, 1) + .saturating_add(40_000_000) + .saturating_add((proof.len() as Weight).saturating_mul(100)) + .saturating_add(action.weight()) + )] + pub(crate) fn claim_swap( + origin: OriginFor, proof: Vec, action: T::SwapAction, ) -> DispatchResult { @@ -280,7 +293,7 @@ decl_module! { PendingSwaps::::remove(target.clone(), hashed_proof.clone()); Self::deposit_event( - RawEvent::SwapClaimed(target, hashed_proof, succeeded) + Event::SwapClaimed(target, hashed_proof, succeeded) ); Ok(()) @@ -292,12 +305,12 @@ decl_module! { /// /// - `target`: Target of the original atomic swap. /// - `hashed_proof`: Hashed proof of the original atomic swap. - #[weight = T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000)] - fn cancel_swap( - origin, + #[pallet::weight(T::DbWeight::get().reads_writes(1, 1).saturating_add(40_000_000))] + pub(crate) fn cancel_swap( + origin: OriginFor, target: T::AccountId, hashed_proof: HashedProof, - ) { + ) -> DispatchResult { let source = ensure_signed(origin)?; let swap = PendingSwaps::::get(&target, hashed_proof) @@ -315,8 +328,10 @@ decl_module! { PendingSwaps::::remove(&target, hashed_proof.clone()); Self::deposit_event( - RawEvent::SwapCancelled(target, hashed_proof) + Event::SwapCancelled(target, hashed_proof) ); + + Ok(()) } } } diff --git a/frame/atomic-swap/src/tests.rs b/frame/atomic-swap/src/tests.rs index baa9a08957d4a..cc2849f5bd2c0 100644 --- a/frame/atomic-swap/src/tests.rs +++ b/frame/atomic-swap/src/tests.rs @@ -53,6 +53,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 6cae6c94c9a89..5b247b008de2f 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } pallet-session = { version = "3.0.0", default-features = false, path = "../session" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } @@ -36,7 +35,6 @@ std = [ "sp-application-crypto/std", "codec/std", "sp-std/std", - "serde", "sp-runtime/std", "frame-support/std", "sp-consensus-aura/std", diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 481edbaff487f..26d5a2754974f 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -70,6 +70,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_timestamp::Config for Test { diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 85844cf716f03..25fec9118230e 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -17,7 +17,6 @@ sp-authority-discovery = { version = "3.0.0", default-features = false, path = " sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -serde = { version = "1.0.101", optional = true } pallet-session = { version = "3.0.0", features = ["historical" ], path = "../session", default-features = false } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } @@ -35,7 +34,6 @@ std = [ "sp-authority-discovery/std", "codec/std", "sp-std/std", - "serde", "pallet-session/std", "sp-runtime/std", "frame-support/std", diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index ca8f3eeff3d68..6b7608b10c3bd 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -15,45 +15,87 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Authority discovery module. +//! # Authority discovery pallet. //! -//! This module is used by the `client/authority-discovery` and by polkadot's parachain logic +//! This pallet is used by the `client/authority-discovery` and by polkadot's parachain logic //! to retrieve the current and the next set of authorities. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, traits::OneSessionHandler}; +use frame_support::traits::OneSessionHandler; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; use sp_authority_discovery::AuthorityId; -/// The module's config trait. -pub trait Config: frame_system::Config + pallet_session::Config {} +pub use pallet::*; -decl_storage! { - trait Store for Module as AuthorityDiscovery { - /// Keys of the current authority set. - Keys get(fn keys): Vec; - /// Keys of the next authority set. - NextKeys get(fn next_keys): Vec; - } - add_extra_genesis { - config(keys): Vec; - build(|config| Module::::initialize_keys(&config.keys)) +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] + /// The pallet's config trait. + pub trait Config: frame_system::Config + pallet_session::Config {} + + #[pallet::storage] + #[pallet::getter(fn keys)] + /// Keys of the current authority set. + pub(super) type Keys = StorageValue< + _, + Vec, + ValueQuery, + >; + + #[pallet::storage] + #[pallet::getter(fn next_keys)] + /// Keys of the next authority set. + pub(super) type NextKeys = StorageValue< + _, + Vec, + ValueQuery, + >; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub keys: Vec, } -} -decl_module! { - pub struct Module for enum Call where origin: T::Origin { + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + keys: Default::default(), + } + } + } + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_keys(&self.keys) + } } + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} } -impl Module { +impl Pallet { /// Retrieve authority identifiers of the current and next authority set /// sorted and deduplicated. pub fn authorities() -> Vec { - let mut keys = Keys::get(); - let next = NextKeys::get(); + let mut keys = Keys::::get(); + let next = NextKeys::::get(); keys.extend(next); keys.sort(); @@ -64,28 +106,28 @@ impl Module { /// Retrieve authority identifiers of the current authority set in the original order. pub fn current_authorities() -> Vec { - Keys::get() + Keys::::get() } /// Retrieve authority identifiers of the next authority set in the original order. pub fn next_authorities() -> Vec { - NextKeys::get() + NextKeys::::get() } fn initialize_keys(keys: &[AuthorityId]) { if !keys.is_empty() { - assert!(Keys::get().is_empty(), "Keys are already initialized!"); - Keys::put(keys); - NextKeys::put(keys); + assert!(Keys::::get().is_empty(), "Keys are already initialized!"); + Keys::::put(keys); + NextKeys::::put(keys); } } } -impl sp_runtime::BoundToRuntimeAppPublic for Module { +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { type Public = AuthorityId; } -impl OneSessionHandler for Module { +impl OneSessionHandler for Pallet { type Key = AuthorityId; fn on_genesis_session<'a, I: 'a>(authorities: I) @@ -102,9 +144,9 @@ impl OneSessionHandler for Module { // Remember who the authorities are for the new and next session. if changed { let keys = validators.map(|x| x.1); - Keys::put(keys.collect::>()); + Keys::::put(keys.collect::>()); let next_keys = queued_validators.map(|x| x.1); - NextKeys::put(next_keys.collect::>()); + NextKeys::::put(next_keys.collect::>()); } } @@ -113,6 +155,17 @@ impl OneSessionHandler for Module { } } +#[cfg(feature = "std")] +impl GenesisConfig { + /// Direct implementation of `GenesisBuild::assimilate_storage`. + pub fn assimilate_storage( + &self, + storage: &mut sp_runtime::Storage + ) -> Result<(), String> { + >::assimilate_storage(self, storage) + } +} + #[cfg(test)] mod tests { use crate as pallet_authority_discovery; @@ -200,6 +253,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } pub struct TestSessionHandler; @@ -220,7 +274,7 @@ mod tests { #[test] fn authorities_returns_current_and_next_authority_set() { - // The whole authority discovery module ignores account ids, but we still need them for + // The whole authority discovery pallet ignores account ids, but we still need them for // `pallet_session::OneSessionHandler::on_new_session`, thus its safe to use the same value // everywhere. let account_id = AuthorityPair::from_seed_slice(vec![10; 32].as_ref()).unwrap().public(); diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 286abc721cbba..a7803319c5396 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -392,6 +392,10 @@ impl ProvideInherent for Module { }, } } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::set_uncles(_)) + } } #[cfg(test)] @@ -447,6 +451,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -485,10 +490,7 @@ mod tests { let pre_runtime_digests = header.digest.logs.iter().filter_map(|d| d.as_pre_runtime()); let seals = header.digest.logs.iter().filter_map(|d| d.as_seal()); - let author = match AuthorGiven::find_author(pre_runtime_digests) { - None => return Err("no author"), - Some(author) => author, - }; + let author = AuthorGiven::find_author(pre_runtime_digests).ok_or_else(|| "no author")?; for (id, seal) in seals { if id == TEST_ID { diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index f7bebce98acf3..64497eafe715c 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -20,7 +20,6 @@ frame-system = { version = "3.0.0", default-features = false, path = "../system" pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } pallet-session = { version = "3.0.0", default-features = false, path = "../session" } pallet-timestamp = { version = "3.0.0", default-features = false, path = "../timestamp" } -serde = { version = "1.0.101", optional = true } sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-babe = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/babe" } sp-consensus-vrf = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/vrf" } @@ -49,7 +48,6 @@ std = [ "pallet-authorship/std", "pallet-session/std", "pallet-timestamp/std", - "serde", "sp-application-crypto/std", "sp-consensus-babe/std", "sp-consensus-vrf/std", diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 137f32b5e502c..39831eceb75ba 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -92,6 +92,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl frame_system::offchain::SendTransactionTypes for Test diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 22c4ef0976f5f..116a52151583a 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -30,7 +29,6 @@ pallet-transaction-payment = { version = "3.0.0", path = "../transaction-payment [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 62959c4f1dc4a..f89775146b136 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -44,7 +44,8 @@ benchmarks_instance_pallet! { let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. + // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, + // and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); @@ -142,6 +143,39 @@ benchmarks_instance_pallet! { assert_eq!(Balances::::free_balance(&source), Zero::zero()); assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } + + // This benchmark performs the same operation as `transfer` in the worst case scenario, + // but additionally introduces many new users into the storage, increasing the the merkle + // trie and PoV size. + #[extra] + transfer_increasing_users { + // 1_000 is not very much, but this upper bound can be controlled by the CLI. + let u in 0 .. 1_000; + let existential_deposit = T::ExistentialDeposit::get(); + let caller = whitelisted_caller(); + + // Give some multiple of the existential deposit + creation fee + transfer fee + let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + + // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, + // and reap this user. + let recipient: T::AccountId = account("recipient", 0, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); + + // Create a bunch of users in storage. + for i in 0 .. u { + // The `account` function uses `blake2_256` to generate unique accounts, so these + // should be quite random and evenly distributed in the trie. + let new_user: T::AccountId = account("new_user", i, SEED); + let _ = as Currency<_>>::make_free_balance_be(&new_user, balance); + } + }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) + verify { + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + } } impl_benchmark_test_suite!( diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index fc4dab7cec4a7..35841c504adf9 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -39,7 +39,7 @@ //! ### Terminology //! //! - **Existential Deposit:** The minimum balance required to create or keep an account open. This prevents -//! "dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) +//! "dust accounts" from filling storage. When the free plus the reserved balance (i.e. the total balance) //! fall below this, then the account is said to be dead; and it loses its functionality as well as any //! prior history and all information on it is removed from the chain's state. //! No account should ever have a total balance that is strictly between 0 and the existential @@ -164,7 +164,8 @@ use frame_support::{ Currency, OnUnbalanced, TryDrop, StoredMap, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, BalanceStatus as Status, + ExistenceRequirement::AllowDeath, + tokens::{fungible, DepositConsequence, WithdrawConsequence, BalanceStatus as Status} } }; #[cfg(feature = "std")] @@ -682,6 +683,78 @@ impl, I: 'static> Pallet { } } + fn deposit_consequence( + _who: &T::AccountId, + amount: T::Balance, + account: &AccountData, + ) -> DepositConsequence { + if amount.is_zero() { return DepositConsequence::Success } + + if TotalIssuance::::get().checked_add(&amount).is_none() { + return DepositConsequence::Overflow + } + + let new_total_balance = match account.total().checked_add(&amount) { + Some(x) => x, + None => return DepositConsequence::Overflow, + }; + + if new_total_balance < T::ExistentialDeposit::get() { + return DepositConsequence::BelowMinimum + } + + // NOTE: We assume that we are a provider, so don't need to do any checks in the + // case of account creation. + + DepositConsequence::Success + } + + fn withdraw_consequence( + who: &T::AccountId, + amount: T::Balance, + account: &AccountData, + ) -> WithdrawConsequence { + if amount.is_zero() { return WithdrawConsequence::Success } + + if TotalIssuance::::get().checked_sub(&amount).is_none() { + return WithdrawConsequence::Underflow + } + + let new_total_balance = match account.total().checked_sub(&amount) { + Some(x) => x, + None => return WithdrawConsequence::NoFunds, + }; + + // Provider restriction - total account balance cannot be reduced to zero if it cannot + // sustain the loss of a provider reference. + // NOTE: This assumes that the pallet is a provider (which is true). Is this ever changes, + // then this will need to adapt accordingly. + let ed = T::ExistentialDeposit::get(); + let success = if new_total_balance < ed { + if frame_system::Pallet::::can_dec_provider(who) { + WithdrawConsequence::ReducedToZero(new_total_balance) + } else { + return WithdrawConsequence::WouldDie + } + } else { + WithdrawConsequence::Success + }; + + // Enough free funds to have them be reduced. + let new_free_balance = match account.free.checked_sub(&amount) { + Some(b) => b, + None => return WithdrawConsequence::NoFunds, + }; + + // Eventual free funds must be no less than the frozen balance. + let min_balance = account.frozen(Reasons::All); + if new_free_balance < min_balance { + return WithdrawConsequence::Frozen + } + + success + } + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce /// `ExistentialDeposit` law, annulling the account as needed. /// @@ -692,7 +765,7 @@ impl, I: 'static> Pallet { /// the caller will do this. pub fn mutate_account( who: &T::AccountId, - f: impl FnOnce(&mut AccountData) -> R + f: impl FnOnce(&mut AccountData) -> R, ) -> Result { Self::try_mutate_account(who, |a, _| -> Result { Ok(f(a)) }) } @@ -708,7 +781,7 @@ impl, I: 'static> Pallet { /// the caller will do this. fn try_mutate_account>( who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result + f: impl FnOnce(&mut AccountData, bool) -> Result, ) -> Result { Self::try_mutate_account_with_dust(who, f) .map(|(result, dust_cleaner)| { @@ -732,7 +805,7 @@ impl, I: 'static> Pallet { /// the caller will do this. fn try_mutate_account_with_dust>( who: &T::AccountId, - f: impl FnOnce(&mut AccountData, bool) -> Result + f: impl FnOnce(&mut AccountData, bool) -> Result, ) -> Result<(R, DustCleaner), E> { let result = T::AccountStore::try_mutate_exists(who, |maybe_account| { let is_new = maybe_account.is_none(); @@ -801,6 +874,192 @@ impl, I: 'static> Pallet { } } } + + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn do_transfer_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: T::Balance, + best_effort: bool, + status: Status, + ) -> Result { + if value.is_zero() { return Ok(Zero::zero()) } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve(slashed, value)), + Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), + }; + } + + let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( + beneficiary, + |to_account, is_new| -> Result<(T::Balance, DustCleaner), DispatchError> { + ensure!(!is_new, Error::::DeadAccount); + Self::try_mutate_account_with_dust( + slashed, + |from_account, _| -> Result { + let actual = cmp::min(from_account.reserved, value); + ensure!(best_effort || actual == value, Error::::InsufficientBalance); + match status { + Status::Free => to_account.free = to_account.free + .checked_add(&actual) + .ok_or(Error::::Overflow)?, + Status::Reserved => to_account.reserved = to_account.reserved + .checked_add(&actual) + .ok_or(Error::::Overflow)?, + } + from_account.reserved -= actual; + Ok(actual) + } + ) + } + )?; + + Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); + Ok(actual) + } +} + +impl, I: 'static> fungible::Inspect for Pallet { + type Balance = T::Balance; + + fn total_issuance() -> Self::Balance { + TotalIssuance::::get() + } + fn minimum_balance() -> Self::Balance { + T::ExistentialDeposit::get() + } + fn balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).total() + } + fn reducible_balance(who: &T::AccountId, keep_alive: bool) -> Self::Balance { + let a = Self::account(who); + // Liquid balance is what is neither reserved nor locked/frozen. + let liquid = a.free.saturating_sub(a.fee_frozen.max(a.misc_frozen)); + if frame_system::Pallet::::can_dec_provider(who) && !keep_alive { + liquid + } else { + // `must_remain_to_exist` is the part of liquid balance which must remain to keep total over + // ED. + let must_remain_to_exist = T::ExistentialDeposit::get().saturating_sub(a.total() - liquid); + liquid.saturating_sub(must_remain_to_exist) + } + } + fn can_deposit(who: &T::AccountId, amount: Self::Balance) -> DepositConsequence { + Self::deposit_consequence(who, amount, &Self::account(who)) + } + fn can_withdraw(who: &T::AccountId, amount: Self::Balance) -> WithdrawConsequence { + Self::withdraw_consequence(who, amount, &Self::account(who)) + } +} + +impl, I: 'static> fungible::Mutate for Pallet { + fn mint_into(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { return Ok(()) } + Self::try_mutate_account(who, |account, _is_new| -> DispatchResult { + Self::deposit_consequence(who, amount, &account).into_result()?; + account.free += amount; + Ok(()) + })?; + TotalIssuance::::mutate(|t| *t += amount); + Ok(()) + } + + fn burn_from(who: &T::AccountId, amount: Self::Balance) -> Result { + if amount.is_zero() { return Ok(Self::Balance::zero()); } + let actual = Self::try_mutate_account(who, |account, _is_new| -> Result { + let extra = Self::withdraw_consequence(who, amount, &account).into_result()?; + let actual = amount + extra; + account.free -= actual; + Ok(actual) + })?; + TotalIssuance::::mutate(|t| *t -= actual); + Ok(actual) + } +} + +impl, I: 'static> fungible::Transfer for Pallet { + fn transfer( + source: &T::AccountId, + dest: &T::AccountId, + amount: T::Balance, + keep_alive: bool, + ) -> Result { + let er = if keep_alive { KeepAlive } else { AllowDeath }; + >::transfer(source, dest, amount, er) + .map(|_| amount) + } +} + +impl, I: 'static> fungible::Unbalanced for Pallet { + fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + Self::mutate_account(who, |account| account.free = amount)?; + Ok(()) + } + + fn set_total_issuance(amount: Self::Balance) { + TotalIssuance::::mutate(|t| *t = amount); + } +} + +impl, I: 'static> fungible::InspectHold for Pallet { + fn balance_on_hold(who: &T::AccountId) -> T::Balance { + Self::account(who).reserved + } + fn can_hold(who: &T::AccountId, amount: T::Balance) -> bool { + let a = Self::account(who); + let min_balance = T::ExistentialDeposit::get().max(a.frozen(Reasons::All)); + if a.reserved.checked_add(&amount).is_none() { return false } + // We require it to be min_balance + amount to ensure that the full reserved funds may be + // slashed without compromising locked funds or destroying the account. + let required_free = match min_balance.checked_add(&amount) { + Some(x) => x, + None => return false, + }; + a.free >= required_free + } +} +impl, I: 'static> fungible::MutateHold for Pallet { + fn hold(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { + if amount.is_zero() { return Ok(()) } + ensure!(Self::can_reserve(who, amount), Error::::InsufficientBalance); + Self::mutate_account(who, |a| { + a.free -= amount; + a.reserved += amount; + })?; + Ok(()) + } + fn release(who: &T::AccountId, amount: Self::Balance, best_effort: bool) + -> Result + { + if amount.is_zero() { return Ok(amount) } + // Done on a best-effort basis. + Self::try_mutate_account(who, |a, _| { + let new_free = a.free.saturating_add(amount.min(a.reserved)); + let actual = new_free - a.free; + ensure!(best_effort || actual == amount, Error::::InsufficientBalance); + // ^^^ Guaranteed to be <= amount and <= a.reserved + a.free = new_free; + a.reserved = a.reserved.saturating_sub(actual.clone()); + Ok(actual) + }) + } + fn transfer_held( + source: &T::AccountId, + dest: &T::AccountId, + amount: Self::Balance, + best_effort: bool, + on_hold: bool, + ) -> Result { + let status = if on_hold { Status::Reserved } else { Status::Free }; + Self::do_transfer_reserved(source, dest, amount, best_effort, status) + } } // wrapping these imbalances in a private module is necessary to ensure absolute privacy @@ -811,6 +1070,7 @@ mod imbalances { TryDrop, RuntimeDebug, }; use sp_std::mem; + use frame_support::traits::SameOrOther; /// Opaque, move-only struct with private fields that serves as a token denoting that /// funds have been created without any equal and opposite accounting. @@ -844,6 +1104,12 @@ mod imbalances { } } + impl, I: 'static> Default for PositiveImbalance { + fn default() -> Self { + Self::zero() + } + } + impl, I: 'static> Imbalance for PositiveImbalance { type Opposite = NegativeImbalance; @@ -874,14 +1140,16 @@ mod imbalances { self.0 = self.0.saturating_add(other.0); mem::forget(other); } - fn offset(self, other: Self::Opposite) -> result::Result { + fn offset(self, other: Self::Opposite) -> SameOrOther { let (a, b) = (self.0, other.0); mem::forget((self, other)); - if a >= b { - Ok(Self(a - b)) + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(NegativeImbalance::new(b - a)) } else { - Err(NegativeImbalance::new(b - a)) + SameOrOther::None } } fn peek(&self) -> T::Balance { @@ -895,6 +1163,12 @@ mod imbalances { } } + impl, I: 'static> Default for NegativeImbalance { + fn default() -> Self { + Self::zero() + } + } + impl, I: 'static> Imbalance for NegativeImbalance { type Opposite = PositiveImbalance; @@ -925,14 +1199,16 @@ mod imbalances { self.0 = self.0.saturating_add(other.0); mem::forget(other); } - fn offset(self, other: Self::Opposite) -> result::Result { + fn offset(self, other: Self::Opposite) -> SameOrOther { let (a, b) = (self.0, other.0); mem::forget((self, other)); - if a >= b { - Ok(Self(a - b)) + if a > b { + SameOrOther::Same(Self(a - b)) + } else if b > a { + SameOrOther::Other(PositiveImbalance::new(b - a)) } else { - Err(PositiveImbalance::new(b - a)) + SameOrOther::None } } fn peek(&self) -> T::Balance { @@ -1363,40 +1639,8 @@ impl, I: 'static> ReservableCurrency for Pallet value: Self::Balance, status: Status, ) -> Result { - if value.is_zero() { return Ok(Zero::zero()) } - - if slashed == beneficiary { - return match status { - Status::Free => Ok(Self::unreserve(slashed, value)), - Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - }; - } - - let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( - beneficiary, - |to_account, is_new| -> Result<(Self::Balance, DustCleaner), DispatchError> { - ensure!(!is_new, Error::::DeadAccount); - Self::try_mutate_account_with_dust( - slashed, - |from_account, _| -> Result { - let actual = cmp::min(from_account.reserved, value); - match status { - Status::Free => to_account.free = to_account.free - .checked_add(&actual) - .ok_or(Error::::Overflow)?, - Status::Reserved => to_account.reserved = to_account.reserved - .checked_add(&actual) - .ok_or(Error::::Overflow)?, - } - from_account.reserved -= actual; - Ok(actual) - } - ) - } - )?; - - Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); - Ok(value - actual) + let actual = Self::do_transfer_reserved(slashed, beneficiary, value, true, status)?; + Ok(value.saturating_sub(actual)) } } diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 3eb70e401e7f8..de12c39ededf5 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -169,13 +169,13 @@ macro_rules! decl_tests { &info_from_weight(1), 1, ).is_err()); - assert!( as SignedExtension>::pre_dispatch( + assert_ok!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, &info_from_weight(1), 1, - ).is_ok()); + )); Balances::set_lock(ID_1, &1, 10, WithdrawReasons::TRANSACTION_PAYMENT); assert_ok!(>::transfer(&1, &2, 1, AllowDeath)); @@ -394,7 +394,7 @@ macro_rules! decl_tests { fn refunding_balance_should_work() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); - assert!(Balances::mutate_account(&1, |a| a.reserved = 69).is_ok()); + assert_ok!(Balances::mutate_account(&1, |a| a.reserved = 69)); Balances::unreserve(&1, 69); assert_eq!(Balances::free_balance(1), 111); assert_eq!(Balances::reserved_balance(1), 0); @@ -669,7 +669,9 @@ macro_rules! decl_tests { assert_eq!(Balances::reserved_balance(1), 50); // Reserve some free balance - let _ = Balances::slash(&1, 1); + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); + // The account should be dead. assert_eq!(Balances::free_balance(1), 0); assert_eq!(Balances::reserved_balance(1), 0); @@ -727,7 +729,8 @@ macro_rules! decl_tests { ] ); - let _ = Balances::slash(&1, 1); + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); assert_eq!( events(), @@ -756,7 +759,8 @@ macro_rules! decl_tests { ] ); - let _ = Balances::slash(&1, 100); + let res = Balances::slash(&1, 100); + assert_eq!(res, (NegativeImbalance::new(100), 0)); assert_eq!( events(), diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 90bcaf1a480ad..b4bdb13fbb838 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -75,6 +75,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 10ea74d8887bc..ac5adfd8d1f3d 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -77,6 +77,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; @@ -174,12 +175,14 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { ] ); - let _ = Balances::slash(&1, 98); + let res = Balances::slash(&1, 98); + assert_eq!(res, (NegativeImbalance::new(98), 0)); // no events assert_eq!(events(), []); - let _ = Balances::slash(&1, 1); + let res = Balances::slash(&1, 1); + assert_eq!(res, (NegativeImbalance::new(1), 0)); assert_eq!( events(), diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 547c7dd7cfb72..3d6a90929aeee 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -90,6 +90,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const TransactionByteFee: u64 = 1; @@ -104,7 +105,7 @@ impl pallet_transaction_payment::Config for Test { pub struct OnDustRemoval; impl OnUnbalanced> for OnDustRemoval { fn on_nonzero_unbalanced(amount: NegativeImbalance) { - let _ = Balances::resolve_into_existing(&1, amount); + assert_ok!(Balances::resolve_into_existing(&1, amount)); } } parameter_types! { diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 463ac7dd35c07..5f3cf2b6bd9a9 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_balances //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2021-01-06, STEPS: \[50, \], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-04-08, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -55,27 +55,27 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn transfer() -> Weight { - (100_698_000 as Weight) + (81_909_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (69_407_000 as Weight) + (61_075_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (38_489_000 as Weight) + (32_255_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (48_458_000 as Weight) + (38_513_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (99_320_000 as Weight) + (80_448_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -84,27 +84,27 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn transfer() -> Weight { - (100_698_000 as Weight) + (81_909_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn transfer_keep_alive() -> Weight { - (69_407_000 as Weight) + (61_075_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_balance_creating() -> Weight { - (38_489_000 as Weight) + (32_255_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_balance_killing() -> Weight { - (48_458_000 as Weight) + (38_513_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_transfer() -> Weight { - (99_320_000 as Weight) + (80_448_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index a9657fd7b11a2..7b6d8838fd21c 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -38,6 +38,7 @@ pub enum BenchmarkSelector { StorageRootTime, Reads, Writes, + ProofSize, } #[derive(Debug)] @@ -86,6 +87,7 @@ impl Analysis { BenchmarkSelector::StorageRootTime => result.storage_root_time, BenchmarkSelector::Reads => result.reads.into(), BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), } ).collect(); @@ -126,6 +128,7 @@ impl Analysis { BenchmarkSelector::StorageRootTime => result.storage_root_time, BenchmarkSelector::Reads => result.reads.into(), BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), }; (result.components[i].1, data) }) @@ -190,6 +193,7 @@ impl Analysis { BenchmarkSelector::StorageRootTime => result.storage_root_time, BenchmarkSelector::Reads => result.reads.into(), BenchmarkSelector::Writes => result.writes.into(), + BenchmarkSelector::ProofSize => result.proof_size.into(), }) } @@ -370,6 +374,7 @@ mod tests { repeat_reads: 0, writes, repeat_writes: 0, + proof_size: 0, } } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index b134e79ca2450..63f65db366651 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -42,6 +42,16 @@ pub use sp_storage::TrackedStorageKey; #[doc(hidden)] pub use log; +/// Whitelist the given account. +#[macro_export] +macro_rules! whitelist { + ($acc:ident) => { + frame_benchmarking::benchmarking::add_to_whitelist( + frame_system::Account::::hashed_key_for(&$acc).into() + ); + }; +} + /// Construct pallet benchmarks for weighing dispatchables. /// /// Works around the idea of complexity parameters, named by a single letter (which is usually @@ -764,12 +774,21 @@ macro_rules! impl_benchmark { "Start Benchmark: {:?}", c ); + let start_pov = $crate::benchmarking::proof_size(); let start_extrinsic = $crate::benchmarking::current_time(); closure_to_benchmark()?; let finish_extrinsic = $crate::benchmarking::current_time(); - let elapsed_extrinsic = finish_extrinsic - start_extrinsic; + let end_pov = $crate::benchmarking::proof_size(); + + // Calculate the diff caused by the benchmark. + let elapsed_extrinsic = finish_extrinsic.saturating_sub(start_extrinsic); + let diff_pov = match (start_pov, end_pov) { + (Some(start), Some(end)) => end.saturating_sub(start), + _ => Default::default(), + }; + // Commit the changes to get proper write count $crate::benchmarking::commit_db(); $crate::log::trace!( @@ -796,6 +815,7 @@ macro_rules! impl_benchmark { repeat_reads: read_write_count.1, writes: read_write_count.2, repeat_writes: read_write_count.3, + proof_size: diff_pov, }); } diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index ac0a208543058..0869ae68c7e09 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -104,6 +104,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types!{ diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 1574e47454b59..2db7b2e95d9d4 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -62,6 +62,7 @@ pub struct BenchmarkResults { pub repeat_reads: u32, pub writes: u32, pub repeat_writes: u32, + pub proof_size: u32, } /// Configuration used to setup and run runtime benchmarks. @@ -162,6 +163,11 @@ pub trait Benchmarking { whitelist.retain(|x| x.key != remove); self.set_whitelist(whitelist); } + + /// Get current estimated proof size. + fn proof_size(&self) -> Option { + self.proof_size() + } } /// The pallet benchmarking trait. diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index ff1a3a6807098..1845f77e97a9a 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -32,7 +31,6 @@ pallet-balances = { version = "3.0.0", path = "../balances" } [features] default = ["std"] std = [ - "serde", "codec/std", "sp-std/std", "sp-runtime/std", diff --git a/frame/bounties/src/lib.rs b/frame/bounties/src/lib.rs index dafa7cd61d054..419713ab5eff5 100644 --- a/frame/bounties/src/lib.rs +++ b/frame/bounties/src/lib.rs @@ -677,14 +677,14 @@ impl Module { /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { - T::ModuleId::get().into_account() + T::PalletId::get().into_account() } /// The account ID of a bounty account pub fn bounty_account_id(id: BountyIndex) -> T::AccountId { // only use two byte prefix to support 16 byte account id (used by test) // "modl" ++ "py/trsry" ++ "bt" is 14 bytes, and two bytes remaining for bounty index - T::ModuleId::get().into_sub_account(("bt", id)) + T::PalletId::get().into_sub_account(("bt", id)) } fn create_bounty( diff --git a/frame/bounties/src/tests.rs b/frame/bounties/src/tests.rs index 617f186975269..e90b1f565a4c9 100644 --- a/frame/bounties/src/tests.rs +++ b/frame/bounties/src/tests.rs @@ -24,12 +24,13 @@ use super::*; use std::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, parameter_types, weights::Weight, traits::OnInitialize + assert_noop, assert_ok, parameter_types, weights::Weight, traits::OnInitialize, + PalletId }; use sp_core::H256; use sp_runtime::{ - Perbill, ModuleId, + Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, }; @@ -80,6 +81,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -102,11 +104,12 @@ parameter_types! { pub const SpendPeriod: u64 = 2; pub const Burn: Permill = Permill::from_percent(50); pub const DataDepositPerByte: u64 = 1; - pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); + pub const TreasuryPalletId: PalletId = PalletId(*b"py/trsry"); + pub const MaxApprovals: u32 = 100; } // impl pallet_treasury::Config for Test { impl pallet_treasury::Config for Test { - type ModuleId = TreasuryModuleId; + type PalletId = TreasuryPalletId; type Currency = pallet_balances::Pallet; type ApproveOrigin = frame_system::EnsureRoot; type RejectOrigin = frame_system::EnsureRoot; @@ -119,6 +122,7 @@ impl pallet_treasury::Config for Test { type BurnDestination = (); // Just gets burned. type WeightInfo = (); type SpendFunds = Bounties; + type MaxApprovals = MaxApprovals; } parameter_types! { pub const BountyDepositBase: u64 = 80; @@ -311,7 +315,7 @@ fn pot_underflow_should_not_diminish() { >::on_initialize(2); assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); + assert_ok!(Balances::deposit_into_existing(&Treasury::account_id(), 100)); >::on_initialize(4); assert_eq!(Balances::free_balance(3), 150); // Fund has been spent assert_eq!(Treasury::pot(), 25); // Pot has finally changed @@ -688,7 +692,8 @@ fn claim_handles_high_fee() { >::on_initialize(5); // make fee > balance - let _ = Balances::slash(&Bounties::bounty_account_id(0), 10); + let res = Balances::slash(&Bounties::bounty_account_id(0), 10); + assert_eq!(res.0.peek(), 10); assert_ok!(Bounties::claim_bounty(Origin::signed(1), 0)); diff --git a/frame/chainbridge/Cargo.toml b/frame/chainbridge/Cargo.toml index 6ad966b2a8ee1..738975837d3e2 100644 --- a/frame/chainbridge/Cargo.toml +++ b/frame/chainbridge/Cargo.toml @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -serde = { version = "1.0.101", optional = true } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -30,7 +29,6 @@ frame-benchmarking = { version = "3.0.0", default-features = false, path = "../b [features] default = ["std"] std = [ - "serde", "codec/std", "sp-runtime/std", "frame-benchmarking/std", diff --git a/frame/chainbridge/src/lib.rs b/frame/chainbridge/src/lib.rs index 5830fabcf1770..8616b54050a4f 100644 --- a/frame/chainbridge/src/lib.rs +++ b/frame/chainbridge/src/lib.rs @@ -7,7 +7,7 @@ use frame_support::{ weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays, GetDispatchInfo}, ensure, traits::{EnsureOrigin, Get}, - Parameter, + Parameter, PalletId }; use sp_std::prelude::*; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -20,14 +20,14 @@ use sp_runtime::{ transaction_validity::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, }, - ModuleId, RuntimeDebug, + RuntimeDebug, }; mod mock; mod tests; const DEFAULT_RELAYER_THRESHOLD: u32 = 1; -const MODULE_ID: ModuleId = ModuleId(*b"cb/bridg"); +const MODULE_ID: PalletId = PalletId(*b"cb/bridg"); pub type ChainId = u8; pub type DepositNonce = u64; diff --git a/frame/chainbridge/src/mock.rs b/frame/chainbridge/src/mock.rs index 5eebf34441074..1565c32fdcba7 100644 --- a/frame/chainbridge/src/mock.rs +++ b/frame/chainbridge/src/mock.rs @@ -2,7 +2,7 @@ use super::*; -use frame_support::{assert_ok, ord_parameter_types, parameter_types, weights::Weight}; +use frame_support::{assert_ok, ord_parameter_types, parameter_types, weights::Weight, PalletId}; use frame_system::{self as system}; use sp_core::H256; use sp_runtime::{ @@ -47,6 +47,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -103,7 +104,7 @@ pub const ENDOWED_BALANCE: u64 = 100_000_000; pub const TEST_THRESHOLD: u32 = 2; pub fn new_test_ext() -> sp_io::TestExternalities { - let bridge_id = ModuleId(*b"cb/bridg").into_account(); + let bridge_id = PalletId(*b"cb/bridg").into_account(); let mut t = frame_system::GenesisConfig::default() .build_storage::() .unwrap(); diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index b8f825cc52931..c4940c87f827b 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } @@ -34,7 +33,6 @@ std = [ "codec/std", "sp-core/std", "sp-std/std", - "serde", "sp-io/std", "frame-support/std", "sp-runtime/std", diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 28c2ff77b81fe..6284617e89bd9 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -48,14 +48,13 @@ use sp_io::storage; use sp_runtime::{RuntimeDebug, traits::Hash}; use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, ensure, BoundedVec, codec::{Decode, Encode}, - decl_error, decl_event, decl_module, decl_storage, dispatch::{ DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, Parameter, PostDispatchInfo, }, - ensure, - traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers}, + traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers, GetBacking, Backing}, weights::{DispatchClass, GetDispatchInfo, Weight, Pays}, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -165,6 +164,15 @@ pub enum RawOrigin { _Phantom(sp_std::marker::PhantomData), } +impl GetBacking for RawOrigin { + fn get_backing(&self) -> Option { + match self { + RawOrigin::Members(n, d) => Some(Backing { approvals: *n, eligible: *d }), + _ => None, + } + } +} + /// Origin for the collective module. pub type Origin = RawOrigin<::AccountId, I>; @@ -186,7 +194,7 @@ pub struct Votes { decl_storage! { trait Store for Module, I: Instance=DefaultInstance> as Collective { /// The hashes of the active proposals. - pub Proposals get(fn proposals): Vec; + pub Proposals get(fn proposals): BoundedVec; /// Actual proposal for a given hash, if it's current. pub ProposalOf get(fn proposal_of): map hasher(identity) T::Hash => Option<>::Proposal>; @@ -462,11 +470,7 @@ decl_module! { } else { let active_proposals = >::try_mutate(|proposals| -> Result { - proposals.push(proposal_hash); - ensure!( - proposals.len() <= T::MaxProposals::get() as usize, - Error::::TooManyProposals - ); + proposals.try_push(proposal_hash).map_err(|_| Error::::TooManyProposals)?; Ok(proposals.len()) })?; let index = Self::proposal_count(); @@ -1004,6 +1008,7 @@ mod tests { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl Config for Test { type Origin = Origin; @@ -1076,7 +1081,7 @@ mod tests { fn motions_basic_environment_works() { new_test_ext().execute_with(|| { assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(Collective::proposals(), Vec::::new()); + assert_eq!(*Collective::proposals(), Vec::::new()); }); } @@ -1306,7 +1311,7 @@ mod tests { let hash = proposal.blake2_256().into(); let end = 4; assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); - assert_eq!(Collective::proposals(), vec![hash]); + assert_eq!(*Collective::proposals(), vec![hash]); assert_eq!(Collective::proposal_of(&hash), Some(proposal)); assert_eq!( Collective::voting(&hash), @@ -1567,9 +1572,9 @@ mod tests { assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()), proposal_len)); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); assert_ok!(Collective::close(Origin::signed(2), hash.clone(), 0, proposal_weight, proposal_len)); - assert_eq!(Collective::proposals(), vec![]); + assert_eq!(*Collective::proposals(), vec![]); assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()), proposal_len)); - assert_eq!(Collective::proposals(), vec![hash]); + assert_eq!(*Collective::proposals(), vec![hash]); }); } diff --git a/frame/contracts/CHANGELOG.md b/frame/contracts/CHANGELOG.md index efc3eb93c5701..9660d903bfe8d 100644 --- a/frame/contracts/CHANGELOG.md +++ b/frame/contracts/CHANGELOG.md @@ -20,6 +20,8 @@ In other words: Upgrading this pallet will not break pre-existing contracts. ### Added +- Add new `instantiate` RPC that allows clients to dry-run contract instantiation. + - Make storage and fields of `Schedule` private to the crate. [1](https://github.com/paritytech/substrate/pull/8359) diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 018a8a5df672e..ba8069604a77b 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -14,35 +14,39 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +log = { version = "0.4", default-features = false } +parity-wasm = { version = "0.42", default-features = false } +pwasm-utils = { version = "0.17", default-features = false } +serde = { version = "1", optional = true, features = ["derive"] } +wasmi-validation = { version = "0.4", default-features = false } + +# Only used in benchmarking to generate random contract code +rand = { version = "0.8", optional = true, default-features = false } +rand_pcg = { version = "0.3", optional = true } + +# Substrate Dependencies frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-contracts-primitives = { version = "3.0.0", default-features = false, path = "common" } pallet-contracts-proc-macro = { version = "3.0.0", path = "proc-macro" } -parity-wasm = { version = "0.41.0", default-features = false } -pwasm-utils = { version = "0.16", default-features = false } -serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-sandbox = { version = "0.9.0", default-features = false, path = "../../primitives/sandbox" } -wasmi-validation = { version = "0.3.0", default-features = false } -log = { version = "0.4.14", default-features = false } - -# Only used in benchmarking to generate random contract code -rand = { version = "0.7.0", optional = true, default-features = false } -rand_pcg = { version = "0.2.1", optional = true } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } [dev-dependencies] -assert_matches = "1.3.0" -hex-literal = "0.3.1" +assert_matches = "1" +hex-literal = "0.3" +paste = "1" +pretty_assertions = "0.7" +wat = "1" + +# Substrate Dependencies pallet-balances = { version = "3.0.0", path = "../balances" } pallet-timestamp = { version = "3.0.0", path = "../timestamp" } pallet-randomness-collective-flip = { version = "3.0.0", path = "../randomness-collective-flip" } -paste = "1.0" -pretty_assertions = "0.6.1" -wat = "1.0" [features] default = ["std"] diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 050e18fc44d14..154ceeb891344 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -13,9 +13,12 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -# This crate should not rely on any of the frame primitives. bitflags = "1.0" -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "2", default-features = false, features = ["derive"] } +serde = { version = "1", features = ["derive"], optional = true } + +# Substrate Dependencies (This crate should not rely on frame) +sp-core = { version = "3.0.0", path = "../../../primitives/core", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } @@ -23,6 +26,8 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../../pri default = ["std"] std = [ "codec/std", + "sp-core/std", "sp-runtime/std", "sp-std/std", + "serde", ] diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/common/src/lib.rs index 2b325d63d628d..17d4bec06b7cf 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/common/src/lib.rs @@ -21,18 +21,45 @@ use bitflags::bitflags; use codec::{Decode, Encode}; +use sp_core::Bytes; use sp_runtime::{DispatchError, RuntimeDebug}; use sp_std::prelude::*; -/// Result type of a `bare_call` call. +#[cfg(feature = "std")] +use serde::{Serialize, Deserialize}; + +/// Result type of a `bare_call` or `bare_instantiate` call. /// -/// The result of a contract execution along with a gas consumed. +/// It contains the execution result together with some auxiliary information. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] -pub struct ContractExecResult { - pub exec_result: ExecResult, +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct ContractResult { + /// How much gas was consumed during execution. pub gas_consumed: u64, + /// An optional debug message. This message is only non-empty when explicitly requested + /// by the code that calls into the contract. + /// + /// The contained bytes are valid UTF-8. This is not declared as `String` because + /// this type is not allowed within the runtime. A client should decode them in order + /// to present the message to its users. + /// + /// # Note + /// + /// The debug message is never generated during on-chain execution. It is reserved for + /// RPC calls. + pub debug_message: Bytes, + /// The execution result of the wasm code. + pub result: T, } +/// Result type of a `bare_call` call. +pub type ContractExecResult = ContractResult>; + +/// Result type of a `bare_instantiate` call. +pub type ContractInstantiateResult = + ContractResult, DispatchError>>; + /// Result type of a `get_storage` call. pub type GetStorageResult = Result>, ContractAccessError>; @@ -50,6 +77,8 @@ pub enum ContractAccessError { } #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub enum RentProjection { /// Eviction is projected to happen at the specified block number. EvictionAt(BlockNumber), @@ -62,6 +91,8 @@ pub enum RentProjection { bitflags! { /// Flags used by a contract to customize exit behaviour. #[derive(Encode, Decode)] + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[cfg_attr(feature = "std", serde(rename_all = "camelCase", transparent))] pub struct ReturnFlags: u32 { /// If this bit is set all changes made by the contract execution are rolled back. const REVERT = 0x0000_0001; @@ -70,11 +101,13 @@ bitflags! { /// Output of a contract call or instantiation which ran to completion. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct ExecReturnValue { /// Flags passed along by `seal_return`. Empty when `seal_return` was never called. pub flags: ReturnFlags, /// Buffer passed along by `seal_return`. Empty when `seal_return` was never called. - pub data: Vec, + pub data: Bytes, } impl ExecReturnValue { @@ -84,40 +117,32 @@ impl ExecReturnValue { } } -/// Origin of the error. -/// -/// Call or instantiate both called into other contracts and pass through errors happening -/// in those to the caller. This enum is for the caller to distinguish whether the error -/// happened during the execution of the callee or in the current execution context. +/// The result of a successful contract instantiation. #[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -pub enum ErrorOrigin { - /// Caller error origin. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub struct InstantiateReturnValue { + /// The output of the called constructor. + pub result: ExecReturnValue, + /// The account id of the new contract. + pub account_id: AccountId, + /// Information about when and if the new project will be evicted. /// - /// The error happened in the current exeuction context rather than in the one - /// of the contract that is called into. - Caller, - /// The error happened during execution of the called contract. - Callee, -} - -/// Error returned by contract exection. -#[derive(PartialEq, Eq, Encode, Decode, RuntimeDebug)] -pub struct ExecError { - /// The reason why the execution failed. - pub error: DispatchError, - /// Origin of the error. - pub origin: ErrorOrigin, + /// # Note + /// + /// `None` if `bare_instantiate` was called with + /// `compute_projection` set to false. From the perspective of an RPC this means that + /// the runtime API did not request this value and this feature is therefore unsupported. + pub rent_projection: Option>, } -impl> From for ExecError { - fn from(error: T) -> Self { - Self { - error: error.into(), - origin: ErrorOrigin::Caller, - } - } +/// Reference to an existing code hash or a new wasm module. +#[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] +pub enum Code { + /// A wasm module as raw bytes. + Upload(Bytes), + /// The code hash of an on-chain wasm blob. + Existing(Hash), } - -/// The result that is returned from contract execution. It either contains the output -/// buffer or an error describing the reason for failure. -pub type ExecResult = Result; diff --git a/frame/contracts/proc-macro/src/lib.rs b/frame/contracts/proc-macro/src/lib.rs index 6fc2fbe82e037..3b8b1ea5e6636 100644 --- a/frame/contracts/proc-macro/src/lib.rs +++ b/frame/contracts/proc-macro/src/lib.rs @@ -117,17 +117,17 @@ fn format_weight(field: &Ident) -> TokenStream { &if self.#field > 1_000_000_000 { format!( "{:.1?} ms", - Fixed::saturating_from_rational(self.#field, 1_000_000_000).to_fraction() + Fixed::saturating_from_rational(self.#field, 1_000_000_000).to_float() ) } else if self.#field > 1_000_000 { format!( "{:.1?} µs", - Fixed::saturating_from_rational(self.#field, 1_000_000).to_fraction() + Fixed::saturating_from_rational(self.#field, 1_000_000).to_float() ) } else if self.#field > 1_000 { format!( "{:.1?} ns", - Fixed::saturating_from_rational(self.#field, 1_000).to_fraction() + Fixed::saturating_from_rational(self.#field, 1_000).to_float() ) } else { format!("{} ps", self.#field) diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index d0068e3e421c9..dbd4356acc4a9 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -13,18 +13,20 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0" } -jsonrpc-core = "15.1.0" -jsonrpc-core-client = "15.1.0" -jsonrpc-derive = "15.1.0" +codec = { package = "parity-scale-codec", version = "2" } +jsonrpc-core = "15" +jsonrpc-core-client = "15" +jsonrpc-derive = "15" +serde = { version = "1", features = ["derive"] } + +# Substrate Dependencies +pallet-contracts-primitives = { version = "3.0.0", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "3.0.0", path = "./runtime-api" } +sp-api = { version = "3.0.0", path = "../../../primitives/api" } sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-rpc = { version = "3.0.0", path = "../../../primitives/rpc" } -serde = { version = "1.0.101", features = ["derive"] } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "3.0.0", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "3.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "3.0.0", path = "./runtime-api" } [dev-dependencies] -serde_json = "1.0.41" +serde_json = "1" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 32de637f10822..8ce1c13e667eb 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -13,11 +13,13 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { package = "parity-scale-codec", version = "2", default-features = false, features = ["derive"] } + +# Substrate Dependencies +pallet-contracts-primitives = { version = "3.0.0", default-features = false, path = "../../common" } sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "3.0.0", default-features = false, path = "../../../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "3.0.0", default-features = false, path = "../../common" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../../primitives/std" } [features] default = ["std"] diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs index 6f0399586fa22..943931ec0c846 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -25,18 +25,21 @@ use codec::Codec; use sp_std::vec::Vec; -use pallet_contracts_primitives::{ContractExecResult, GetStorageResult, RentProjectionResult}; +use pallet_contracts_primitives::{ + ContractExecResult, GetStorageResult, RentProjectionResult, Code, ContractInstantiateResult, +}; sp_api::decl_runtime_apis! { /// The API to interact with contracts without using executive. - pub trait ContractsApi where + pub trait ContractsApi where AccountId: Codec, Balance: Codec, BlockNumber: Codec, + Hash: Codec, { /// Perform a call from a specified account to a given contract. /// - /// See the contracts' `call` dispatchable function for more details. + /// See [`pallet_contracts::Pallet::call`]. fn call( origin: AccountId, dest: AccountId, @@ -45,6 +48,18 @@ sp_api::decl_runtime_apis! { input_data: Vec, ) -> ContractExecResult; + /// Instantiate a new contract. + /// + /// See [`pallet_contracts::Pallet::instantiate`]. + fn instantiate( + origin: AccountId, + endowment: Balance, + gas_limit: u64, + code: Code, + data: Vec, + salt: Vec, + ) -> ContractInstantiateResult; + /// Query a given storage key in a given contract. /// /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index e0a056906f743..dd9ec164a984b 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -27,14 +27,13 @@ use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; use sp_core::{Bytes, H256}; -use sp_rpc::number; +use sp_rpc::number::NumberOrHex; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, - DispatchError, }; use std::convert::{TryFrom, TryInto}; -use pallet_contracts_primitives::ContractExecResult; +use pallet_contracts_primitives::{Code, ContractExecResult, ContractInstantiateResult}; pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; @@ -42,6 +41,8 @@ const RUNTIME_ERROR: i64 = 1; const CONTRACT_DOESNT_EXIST: i64 = 2; const CONTRACT_IS_A_TOMBSTONE: i64 = 3; +pub type Weight = u64; + /// A rough estimate of how much gas a decent hardware consumes per second, /// using native execution. /// This value is used to set the upper bound for maximal contract calls to @@ -50,7 +51,11 @@ const CONTRACT_IS_A_TOMBSTONE: i64 = 3; /// As 1 gas is equal to 1 weight we base this on the conducted benchmarks which /// determined runtime weights: /// https://github.com/paritytech/substrate/pull/5446 -const GAS_PER_SECOND: u64 = 1_000_000_000_000; +const GAS_PER_SECOND: Weight = 1_000_000_000_000; + +/// The maximum amount of weight that the call and instantiate rpcs are allowed to consume. +/// This puts a ceiling on the weight limit that is supplied to the rpc as an argument. +const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; /// A private newtype for converting `ContractAccessError` into an RPC error. struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); @@ -79,59 +84,27 @@ impl From for Error { pub struct CallRequest { origin: AccountId, dest: AccountId, - value: number::NumberOrHex, - gas_limit: number::NumberOrHex, + value: NumberOrHex, + gas_limit: NumberOrHex, input_data: Bytes, } +/// A struct that encodes RPC parameters required to instantiate a new smart-contract. #[derive(Serialize, Deserialize)] -#[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] -struct RpcContractExecSuccess { - /// The return flags. See `pallet_contracts_primitives::ReturnFlags`. - flags: u32, - /// Data as returned by the contract. - data: Bytes, -} - -/// An RPC serializable result of contract execution -#[derive(Serialize, Deserialize)] #[serde(deny_unknown_fields)] -#[serde(rename_all = "camelCase")] -pub struct RpcContractExecResult { - /// How much gas was consumed by the call. In case of an error this is the amount - /// that was used up until the error occurred. - gas_consumed: u64, - /// Additional dynamic human readable error information for debugging. An empty string - /// indicates that no additional information is available. - debug_message: String, - /// Indicates whether the contract execution was successful or not. - result: std::result::Result, -} - -impl From for RpcContractExecResult { - fn from(r: ContractExecResult) -> Self { - match r.exec_result { - Ok(val) => RpcContractExecResult { - gas_consumed: r.gas_consumed, - debug_message: String::new(), - result: Ok(RpcContractExecSuccess { - flags: val.flags.bits(), - data: val.data.into(), - }), - }, - Err(err) => RpcContractExecResult { - gas_consumed: r.gas_consumed, - debug_message: String::new(), - result: Err(err.error), - }, - } - } +pub struct InstantiateRequest { + origin: AccountId, + endowment: NumberOrHex, + gas_limit: NumberOrHex, + code: Code, + data: Bytes, + salt: Bytes, } /// Contracts RPC methods. #[rpc] -pub trait ContractsApi { +pub trait ContractsApi { /// Executes a call to a contract. /// /// This call is performed locally without submitting any transactions. Thus executing this @@ -143,7 +116,20 @@ pub trait ContractsApi { &self, call_request: CallRequest, at: Option, - ) -> Result; + ) -> Result; + + /// Instantiate a new contract. + /// + /// This call is performed locally without submitting any transactions. Thus the contract + /// is not actually created. + /// + /// This method is useful for UIs to dry-run contract instantiations. + #[rpc(name = "contracts_instantiate")] + fn instantiate( + &self, + instantiate_request: InstantiateRequest, + at: Option, + ) -> Result>; /// Returns the value under a specified storage `key` in a contract given by `address` param, /// or `None` if it is not set. @@ -184,12 +170,13 @@ impl Contracts { } } } -impl +impl ContractsApi< ::Hash, <::Header as HeaderT>::Number, AccountId, Balance, + Hash, > for Contracts where Block: BlockT, @@ -199,15 +186,17 @@ where AccountId, Balance, <::Header as HeaderT>::Number, + Hash, >, AccountId: Codec, - Balance: Codec + TryFrom, + Balance: Codec + TryFrom, + Hash: Codec, { fn call( &self, call_request: CallRequest, at: Option<::Hash>, - ) -> Result { + ) -> Result { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. @@ -221,37 +210,45 @@ where input_data, } = call_request; - // Make sure that value fits into the balance type. - let value: Balance = value.try_into().map_err(|_| Error { - code: ErrorCode::InvalidParams, - message: format!("{:?} doesn't fit into the balance type", value), - data: None, - })?; - - // Make sure that gas_limit fits into 64 bits. - let gas_limit: u64 = gas_limit.try_into().map_err(|_| Error { - code: ErrorCode::InvalidParams, - message: format!("{:?} doesn't fit in 64 bit unsigned value", gas_limit), - data: None, - })?; - - let max_gas_limit = 5 * GAS_PER_SECOND; - if gas_limit > max_gas_limit { - return Err(Error { - code: ErrorCode::InvalidParams, - message: format!( - "Requested gas limit is greater than maximum allowed: {} > {}", - gas_limit, max_gas_limit - ), - data: None, - }); - } + let value: Balance = decode_hex(value, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + limit_gas(gas_limit)?; let exec_result = api .call(&at, origin, dest, value, gas_limit, input_data.to_vec()) .map_err(runtime_error_into_rpc_err)?; - Ok(exec_result.into()) + Ok(exec_result) + } + + fn instantiate( + &self, + instantiate_request: InstantiateRequest, + at: Option<::Hash>, + ) -> Result::Header as HeaderT>::Number>> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash)); + + let InstantiateRequest { + origin, + endowment, + gas_limit, + code, + data, + salt, + } = instantiate_request; + + let endowment: Balance = decode_hex(endowment, "balance")?; + let gas_limit: Weight = decode_hex(gas_limit, "weight")?; + limit_gas(gas_limit)?; + + let exec_result = api + .instantiate(&at, origin, endowment, gas_limit, code, data.to_vec(), salt.to_vec()) + .map_err(runtime_error_into_rpc_err)?; + + Ok(exec_result) } fn get_storage( @@ -300,16 +297,43 @@ where fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> Error { Error { code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Runtime trapped".into(), + message: "Runtime error".into(), data: Some(format!("{:?}", err).into()), } } +fn decode_hex>(from: H, name: &str) -> Result { + from.try_into().map_err(|_| Error { + code: ErrorCode::InvalidParams, + message: format!("{:?} does not fit into the {} type", from, name), + data: None, + }) +} + +fn limit_gas(gas_limit: Weight) -> Result<()> { + if gas_limit > GAS_LIMIT { + Err(Error { + code: ErrorCode::InvalidParams, + message: format!( + "Requested gas limit is greater than maximum allowed: {} > {}", + gas_limit, GAS_LIMIT + ), + data: None, + }) + } else { + Ok(()) + } +} + #[cfg(test)] mod tests { use super::*; use sp_core::U256; + fn trim(json: &str) -> String { + json.chars().filter(|c| !c.is_whitespace()).collect() + } + #[test] fn call_request_should_serialize_deserialize_properly() { type Req = CallRequest; @@ -327,13 +351,84 @@ mod tests { } #[test] - fn result_should_serialize_deserialize_properly() { + fn instantiate_request_should_serialize_deserialize_properly() { + type Req = InstantiateRequest; + let req: Req = serde_json::from_str(r#" + { + "origin": "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL", + "endowment": "0x88", + "gasLimit": 42, + "code": { "existing": "0x1122" }, + "data": "0x4299", + "salt": "0x9988" + } + "#).unwrap(); + + assert_eq!(req.origin, "5CiPPseXPECbkjWCa6MnjNokrgYjMqmKndv2rSnekmSK2DjL"); + assert_eq!(req.endowment.into_u256(), 0x88.into()); + assert_eq!(req.gas_limit.into_u256(), 42.into()); + assert_eq!(&*req.data, [0x42, 0x99].as_ref()); + assert_eq!(&*req.salt, [0x99, 0x88].as_ref()); + let code = match req.code { + Code::Existing(hash) => hash, + _ => panic!("json encoded an existing hash"), + }; + assert_eq!(&code, "0x1122"); + } + + #[test] + fn call_result_should_serialize_deserialize_properly() { + fn test(expected: &str) { + let res: ContractExecResult = serde_json::from_str(expected).unwrap(); + let actual = serde_json::to_string(&res).unwrap(); + assert_eq!(actual, trim(expected).as_str()); + } + test(r#"{ + "gasConsumed": 5000, + "debugMessage": "0x68656c704f6b", + "result": { + "Ok": { + "flags": 5, + "data": "0x1234" + } + } + }"#); + test(r#"{ + "gasConsumed": 3400, + "debugMessage": "0x68656c70457272", + "result": { + "Err": "BadOrigin" + } + }"#); + } + + #[test] + fn instantiate_result_should_serialize_deserialize_properly() { fn test(expected: &str) { - let res: RpcContractExecResult = serde_json::from_str(expected).unwrap(); + let res: ContractInstantiateResult = serde_json::from_str(expected).unwrap(); let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, expected); + assert_eq!(actual, trim(expected).as_str()); } - test(r#"{"gasConsumed":5000,"debugMessage":"helpOk","result":{"Ok":{"flags":5,"data":"0x1234"}}}"#); - test(r#"{"gasConsumed":3400,"debugMessage":"helpErr","result":{"Err":"BadOrigin"}}"#); + test(r#"{ + "gasConsumed": 5000, + "debugMessage": "0x68656c704f6b", + "result": { + "Ok": { + "result": { + "flags": 5, + "data": "0x1234" + }, + "accountId": "5CiPP", + "rentProjection": null + } + } + }"#); + test(r#"{ + "gasConsumed": 3400, + "debugMessage": "0x68656c70457272", + "result": { + "Err": "BadOrigin" + } + }"#); } } diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 118ce038fc229..74c678f548741 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -128,14 +128,14 @@ where let mut contract = parity_wasm::builder::module() // deploy function (first internal function) .function() - .signature().with_return_type(None).build() + .signature().build() .with_body(def.deploy_body.unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty()) )) .build() // call function (second internal function) .function() - .signature().with_return_type(None).build() + .signature().build() .with_body(def.call_body.unwrap_or_else(|| FuncBody::new(Vec::new(), Instructions::empty()) )) @@ -147,7 +147,7 @@ where if let Some(body) = def.aux_body { let mut signature = contract .function() - .signature().with_return_type(None); + .signature(); for _ in 0 .. def.aux_arg_num { signature = signature.with_param(ValueType::I64); } @@ -166,7 +166,7 @@ where for func in def.imported_functions { let sig = parity_wasm::builder::signature() .with_params(func.params) - .with_return_type(func.return_type) + .with_results(func.return_type.into_iter().collect()) .build_sig(); let sig = contract.push_signature(sig); contract = contract.import() @@ -450,11 +450,11 @@ pub mod body { vec![Instruction::I32Const(current as i32)] }, DynInstr::RandomUnaligned(low, high) => { - let unaligned = rng.gen_range(*low, *high) | 1; + let unaligned = rng.gen_range(*low..*high) | 1; vec![Instruction::I32Const(unaligned as i32)] }, DynInstr::RandomI32(low, high) => { - vec![Instruction::I32Const(rng.gen_range(*low, *high))] + vec![Instruction::I32Const(rng.gen_range(*low..*high))] }, DynInstr::RandomI32Repeated(num) => { (&mut rng).sample_iter(Standard).take(*num).map(|val| @@ -469,19 +469,19 @@ pub mod body { .collect() }, DynInstr::RandomGetLocal(low, high) => { - vec![Instruction::GetLocal(rng.gen_range(*low, *high))] + vec![Instruction::GetLocal(rng.gen_range(*low..*high))] }, DynInstr::RandomSetLocal(low, high) => { - vec![Instruction::SetLocal(rng.gen_range(*low, *high))] + vec![Instruction::SetLocal(rng.gen_range(*low..*high))] }, DynInstr::RandomTeeLocal(low, high) => { - vec![Instruction::TeeLocal(rng.gen_range(*low, *high))] + vec![Instruction::TeeLocal(rng.gen_range(*low..*high))] }, DynInstr::RandomGetGlobal(low, high) => { - vec![Instruction::GetGlobal(rng.gen_range(*low, *high))] + vec![Instruction::GetGlobal(rng.gen_range(*low..*high))] }, DynInstr::RandomSetGlobal(low, high) => { - vec![Instruction::SetGlobal(rng.gen_range(*low, *high))] + vec![Instruction::SetGlobal(rng.gen_range(*low..*high))] }, } ) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 27f70dea8c598..be471ed0c72ea 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -32,17 +32,52 @@ use frame_support::{ weights::Weight, ensure, }; -use pallet_contracts_primitives::{ErrorOrigin, ExecError, ExecReturnValue, ExecResult, ReturnFlags}; +use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; pub type AccountIdOf = ::AccountId; pub type MomentOf = <::Time as Time>::Moment; pub type SeedOf = ::Hash; pub type BlockNumberOf = ::BlockNumber; pub type StorageKey = [u8; 32]; +pub type ExecResult = Result; /// A type that represents a topic of an event. At the moment a hash is used. pub type TopicOf = ::Hash; +/// Origin of the error. +/// +/// Call or instantiate both called into other contracts and pass through errors happening +/// in those to the caller. This enum is for the caller to distinguish whether the error +/// happened during the execution of the callee or in the current execution context. +#[cfg_attr(test, derive(Debug, PartialEq))] +pub enum ErrorOrigin { + /// Caller error origin. + /// + /// The error happened in the current exeuction context rather than in the one + /// of the contract that is called into. + Caller, + /// The error happened during execution of the called contract. + Callee, +} + +/// Error returned by contract exection. +#[cfg_attr(test, derive(Debug, PartialEq))] +pub struct ExecError { + /// The reason why the execution failed. + pub error: DispatchError, + /// Origin of the error. + pub origin: ErrorOrigin, +} + +impl> From for ExecError { + fn from(error: T) -> Self { + Self { + error: error.into(), + origin: ErrorOrigin::Caller, + } + } +} + /// Information needed for rent calculations that can be requested by a contract. #[derive(codec::Encode)] #[cfg_attr(test, derive(Debug, PartialEq))] @@ -937,7 +972,7 @@ mod tests { use super::*; use crate::{ gas::GasMeter, tests::{ExtBuilder, Test, Event as MetaEvent}, - storage::Storage, + storage::{Storage, ContractAbsentError}, tests::{ ALICE, BOB, CHARLIE, test_utils::{place_contract, set_balance, get_balance}, @@ -945,6 +980,8 @@ mod tests { exec::ExportedFunction::*, Error, Weight, CurrentSchedule, }; + use sp_core::Bytes; + use frame_support::assert_noop; use sp_runtime::DispatchError; use assert_matches::assert_matches; use std::{cell::RefCell, collections::HashMap, rc::Rc}; @@ -1122,7 +1159,7 @@ mod tests { } fn exec_success() -> ExecResult { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) } #[test] @@ -1185,7 +1222,7 @@ mod tests { let return_ch = MockLoader::insert( Call, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Vec::new() }) + |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(Vec::new()) }) ); ExtBuilder::default().build().execute_with(|| { @@ -1245,7 +1282,7 @@ mod tests { let dest = BOB; let return_ch = MockLoader::insert( Call, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }) + |_, _| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![1, 2, 3, 4]) }) ); ExtBuilder::default().build().execute_with(|| { @@ -1262,7 +1299,7 @@ mod tests { let output = result.unwrap(); assert!(output.0.is_success()); - assert_eq!(output.0.data, vec![1, 2, 3, 4]); + assert_eq!(output.0.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1274,7 +1311,7 @@ mod tests { let dest = BOB; let return_ch = MockLoader::insert( Call, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![1, 2, 3, 4] }) + |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![1, 2, 3, 4]) }) ); ExtBuilder::default().build().execute_with(|| { @@ -1291,7 +1328,7 @@ mod tests { let output = result.unwrap(); assert!(!output.0.is_success()); - assert_eq!(output.0.data, vec![1, 2, 3, 4]); + assert_eq!(output.0.data, Bytes(vec![1, 2, 3, 4])); }); } @@ -1511,7 +1548,7 @@ mod tests { fn instantiation_work_with_success_output() { let dummy_ch = MockLoader::insert( Constructor, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: vec![80, 65, 83, 83] }) + |_, _| Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(vec![80, 65, 83, 83]) }) ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -1531,7 +1568,7 @@ mod tests { vec![], &[], ), - Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address + Ok((address, ref output)) if output.data == Bytes(vec![80, 65, 83, 83]) => address ); // Check that the newly created account has the expected code hash and @@ -1547,7 +1584,7 @@ mod tests { fn instantiation_fails_with_failing_output() { let dummy_ch = MockLoader::insert( Constructor, - |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: vec![70, 65, 73, 76] }) + |_, _| Ok(ExecReturnValue { flags: ReturnFlags::REVERT, data: Bytes(vec![70, 65, 73, 76]) }) ); ExtBuilder::default().existential_deposit(15).build().execute_with(|| { @@ -1567,11 +1604,14 @@ mod tests { vec![], &[], ), - Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address + Ok((address, ref output)) if output.data == Bytes(vec![70, 65, 73, 76]) => address ); // Check that the account has not been created. - assert!(Storage::::code_hash(&instantiated_contract_address).is_err()); + assert_noop!( + Storage::::code_hash(&instantiated_contract_address), + ContractAbsentError, + ); assert!(events().is_empty()); }); } diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 80e608b217bd3..31cc5fad30c93 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Config, Error}; +use crate::{Config, Error, exec::ExecError}; use sp_std::marker::PhantomData; use sp_runtime::traits::Zero; use frame_support::{ @@ -24,7 +24,6 @@ use frame_support::{ }, weights::Weight, }; -use pallet_contracts_primitives::ExecError; use sp_core::crypto::UncheckedFrom; #[cfg(test)] @@ -128,10 +127,7 @@ where } let amount = token.calculate_amount(metadata); - let new_value = match self.gas_left.checked_sub(amount) { - None => None, - Some(val) => Some(val), - }; + let new_value = self.gas_left.checked_sub(amount); // We always consume the gas even if there is not enough gas. self.gas_left = new_value.unwrap_or_else(Zero::zero); diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 46947ea9e1aef..2aa6b8f2ec7b9 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -112,7 +112,7 @@ use crate::{ weights::WeightInfo, wasm::PrefabWasmModule, }; -use sp_core::crypto::UncheckedFrom; +use sp_core::{Bytes, crypto::UncheckedFrom}; use sp_std::prelude::*; use sp_runtime::{ traits::{ @@ -127,6 +127,7 @@ use frame_support::{ use frame_system::Pallet as System; use pallet_contracts_primitives::{ RentProjectionResult, GetStorageResult, ContractAccessError, ContractExecResult, + ContractInstantiateResult, Code, InstantiateReturnValue, }; type CodeHash = ::Hash; @@ -666,8 +667,8 @@ where { /// Perform a call to a specified contract. /// - /// This function is similar to `Self::call`, but doesn't perform any address lookups and better - /// suitable for calling directly from Rust. + /// This function is similar to [`Self::call`], but doesn't perform any address lookups + /// and better suitable for calling directly from Rust. /// /// It returns the execution result and the amount of used weight. pub fn bare_call( @@ -683,8 +684,65 @@ where let result = ctx.call(dest, value, &mut gas_meter, input_data); let gas_consumed = gas_meter.gas_spent(); ContractExecResult { - exec_result: result.map(|r| r.0).map_err(|r| r.0), + result: result.map(|r| r.0).map_err(|r| r.0.error), gas_consumed, + debug_message: Bytes(Vec::new()), + } + } + + /// Instantiate a new contract. + /// + /// This function is similar to [`Self::instantiate`], but doesn't perform any address lookups + /// and better suitable for calling directly from Rust. + /// + /// It returns the execution result, account id and the amount of used weight. + /// + /// If `compute_projection` is set to `true` the result also contains the rent projection. + /// This is optional because some non trivial and stateful work is performed to compute + /// the projection. See [`Self::rent_projection`]. + pub fn bare_instantiate( + origin: T::AccountId, + endowment: BalanceOf, + gas_limit: Weight, + code: Code>, + data: Vec, + salt: Vec, + compute_projection: bool, + ) -> ContractInstantiateResult { + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = >::get(); + let mut ctx = ExecutionContext::>::top_level(origin, &schedule); + let executable = match code { + Code::Upload(Bytes(binary)) => PrefabWasmModule::from_code(binary, &schedule), + Code::Existing(hash) => PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter), + }; + let executable = match executable { + Ok(executable) => executable, + Err(error) => return ContractInstantiateResult { + result: Err(error.into()), + gas_consumed: gas_meter.gas_spent(), + debug_message: Bytes(Vec::new()), + } + }; + let result = ctx.instantiate(endowment, &mut gas_meter, executable, data, &salt) + .and_then(|(account_id, result)| { + let rent_projection = if compute_projection { + Some(Rent::>::compute_projection(&account_id) + .map_err(|_| >::NewContractNotFunded)?) + } else { + None + }; + + Ok(InstantiateReturnValue { + result, + account_id, + rent_projection, + }) + }); + ContractInstantiateResult { + result: result.map_err(|e| e.error), + gas_consumed: gas_meter.gas_spent(), + debug_message: Bytes(Vec::new()), } } diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs index 2e10f4b7ff685..4fc138d3f3daf 100644 --- a/frame/contracts/src/migration.rs +++ b/frame/contracts/src/migration.rs @@ -26,16 +26,12 @@ pub fn migrate() -> Weight { Some(version) if version == PalletVersion::new(3, 0, 0) => { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); let _ = >::translate::(|version| { - if let Some(version) = version { - Some(Schedule { + version.map(|version| Schedule { version: version.saturating_add(1), // Default limits were not decreased. Therefore it is OK to overwrite // the schedule with the new defaults. .. Default::default() }) - } else { - None - } }); } _ => (), diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 8605451ad1ee7..6e268c48bc824 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -388,7 +388,7 @@ where None | Some(ContractInfo::Tombstone(_)) => return Err(IsTombstone), Some(ContractInfo::Alive(contract)) => contract, }; - let module = PrefabWasmModule::from_storage_noinstr(alive_contract_info.code_hash) + let module = >::from_storage_noinstr(alive_contract_info.code_hash) .map_err(|_| IsTombstone)?; let code_size = module.occupied_storage(); let current_block_number = >::block_number(); @@ -399,15 +399,15 @@ where &alive_contract_info, code_size, ); + + // We skip the eviction in case one is in order. + // Evictions should only be performed by [`try_eviction`]. let new_contract_info = Self::enact_verdict( - account, alive_contract_info, current_block_number, verdict, Some(module), + account, alive_contract_info, current_block_number, verdict, None, ); // Check what happened after enaction of the verdict. - let alive_contract_info = match new_contract_info.map_err(|_| IsTombstone)? { - None => return Err(IsTombstone), - Some(contract) => contract, - }; + let alive_contract_info = new_contract_info.map_err(|_| IsTombstone)?.ok_or_else(|| IsTombstone)?; // Compute how much would the fee per block be with the *updated* balance. let total_balance = T::Currency::total_balance(account); diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 5fb637f3e9f18..a36e96dfe12b9 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -30,6 +30,7 @@ use crate::{ }; use assert_matches::assert_matches; use codec::Encode; +use sp_core::Bytes; use sp_runtime::{ traits::{BlakeTwo256, Hash, IdentityLookup, Convert}, testing::{Header, H256}, @@ -222,6 +223,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_balances::Config for Test { type MaxLocks = (); @@ -834,16 +836,16 @@ fn signed_claim_surcharge_contract_removals() { #[test] fn claim_surcharge_malus() { // Test surcharge malus for inherent - claim_surcharge(9, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); claim_surcharge(7, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); - claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), false); + claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), true); + claim_surcharge(5, |addr| Contracts::claim_surcharge(Origin::none(), addr, Some(ALICE)).is_ok(), false); // Test surcharge malus for signed - claim_surcharge(9, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), true); - claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge(8, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), true); claim_surcharge(7, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); claim_surcharge(6, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); + claim_surcharge(5, |addr| Contracts::claim_surcharge(Origin::signed(ALICE), addr, None).is_ok(), false); } /// Claim surcharge with the given trigger_call at the given blocks. @@ -1730,7 +1732,7 @@ fn self_destruct_works() { EventRecord { phase: Phase::Initialization, event: Event::pallet_balances( - pallet_balances::Event::Transfer(addr.clone(), DJANGO, 93_654) + pallet_balances::Event::Transfer(addr.clone(), DJANGO, 93_086) ), topics: vec![], }, @@ -1753,7 +1755,7 @@ fn self_destruct_works() { // check that the beneficiary (django) got remaining balance // some rent was deducted before termination - assert_eq!(Balances::free_balance(DJANGO), 1_093_654); + assert_eq!(Balances::free_balance(DJANGO), 1_093_086); }); } @@ -1885,7 +1887,7 @@ fn crypto_hashes() { 0, GAS_LIMIT, params, - ).exec_result.unwrap(); + ).result.unwrap(); assert!(result.is_success()); let expected = hash_fn(input.as_ref()); assert_eq!(&result.data[..*expected_size], &*expected); @@ -1920,7 +1922,7 @@ fn transfer_return_code() { 0, GAS_LIMIT, vec![], - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence @@ -1934,7 +1936,7 @@ fn transfer_return_code() { 0, GAS_LIMIT, vec![], - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); }); } @@ -1968,7 +1970,7 @@ fn call_return_code() { 0, GAS_LIMIT, AsRef::<[u8]>::as_ref(&DJANGO).to_vec(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::NotCallable); assert_ok!( @@ -1991,7 +1993,7 @@ fn call_return_code() { 0, GAS_LIMIT, AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence @@ -2005,7 +2007,7 @@ fn call_return_code() { 0, GAS_LIMIT, AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&0u32.to_le_bytes()).cloned().collect(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but callee reverts because "1" is passed. @@ -2016,7 +2018,7 @@ fn call_return_code() { 0, GAS_LIMIT, AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&1u32.to_le_bytes()).cloned().collect(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. @@ -2026,7 +2028,7 @@ fn call_return_code() { 0, GAS_LIMIT, AsRef::<[u8]>::as_ref(&addr_django).iter().chain(&2u32.to_le_bytes()).cloned().collect(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); @@ -2073,7 +2075,7 @@ fn instantiate_return_code() { 0, GAS_LIMIT, callee_hash.clone(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::BelowSubsistenceThreshold); // Contract has enough total balance in order to not go below the subsistence @@ -2087,7 +2089,7 @@ fn instantiate_return_code() { 0, GAS_LIMIT, callee_hash.clone(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::TransferFailed); // Contract has enough balance but the passed code hash is invalid @@ -2098,7 +2100,7 @@ fn instantiate_return_code() { 0, GAS_LIMIT, vec![0; 33], - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CodeNotFound); // Contract has enough balance but callee reverts because "1" is passed. @@ -2108,7 +2110,7 @@ fn instantiate_return_code() { 0, GAS_LIMIT, callee_hash.iter().chain(&1u32.to_le_bytes()).cloned().collect(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeReverted); // Contract has enough balance but callee traps because "2" is passed. @@ -2118,7 +2120,7 @@ fn instantiate_return_code() { 0, GAS_LIMIT, callee_hash.iter().chain(&2u32.to_le_bytes()).cloned().collect(), - ).exec_result.unwrap(); + ).result.unwrap(); assert_return_code!(result, RuntimeReturnCode::CalleeTrapped); }); @@ -2208,7 +2210,7 @@ fn chain_extension_works() { ); let gas_consumed = result.gas_consumed; assert_eq!(TestExtension::last_seen_buffer(), vec![0, 99]); - assert_eq!(result.exec_result.unwrap().data, vec![0, 99]); + assert_eq!(result.result.unwrap().data, Bytes(vec![0, 99])); // 1 = treat inputs as integer primitives and store the supplied integers Contracts::bare_call( @@ -2217,7 +2219,7 @@ fn chain_extension_works() { 0, GAS_LIMIT, vec![1], - ).exec_result.unwrap(); + ).result.unwrap(); // those values passed in the fixture assert_eq!(TestExtension::last_seen_inputs(), (4, 1, 16, 12)); @@ -2229,7 +2231,7 @@ fn chain_extension_works() { GAS_LIMIT, vec![2, 42], ); - assert_ok!(result.exec_result); + assert_ok!(result.result); assert_eq!(result.gas_consumed, gas_consumed + 42); // 3 = diverging chain extension call that sets flags to 0x1 and returns a fixed buffer @@ -2239,9 +2241,9 @@ fn chain_extension_works() { 0, GAS_LIMIT, vec![3], - ).exec_result.unwrap(); + ).result.unwrap(); assert_eq!(result.flags, ReturnFlags::REVERT); - assert_eq!(result.data, vec![42, 99]); + assert_eq!(result.data, Bytes(vec![42, 99])); }); } @@ -2766,7 +2768,7 @@ fn reinstrument_does_charge() { GAS_LIMIT, zero.clone(), ); - assert!(result0.exec_result.unwrap().is_success()); + assert!(result0.result.unwrap().is_success()); let result1 = Contracts::bare_call( ALICE, @@ -2775,7 +2777,7 @@ fn reinstrument_does_charge() { GAS_LIMIT, zero.clone(), ); - assert!(result1.exec_result.unwrap().is_success()); + assert!(result1.result.unwrap().is_success()); // They should match because both where called with the same schedule. assert_eq!(result0.gas_consumed, result1.gas_consumed); @@ -2793,7 +2795,7 @@ fn reinstrument_does_charge() { GAS_LIMIT, zero.clone(), ); - assert!(result2.exec_result.unwrap().is_success()); + assert!(result2.result.unwrap().is_success()); assert!(result2.gas_consumed > result1.gas_consumed); assert_eq!( result2.gas_consumed, diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index cfb529d2932b6..10d61bab1bb2c 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -28,15 +28,15 @@ macro_rules! convert_args { macro_rules! gen_signature { ( ( $( $params: ty ),* ) ) => ( { - parity_wasm::elements::FunctionType::new(convert_args!($($params),*), None) + parity_wasm::elements::FunctionType::new(convert_args!($($params),*), vec![]) } ); ( ( $( $params: ty ),* ) -> $returns: ty ) => ( { - parity_wasm::elements::FunctionType::new(convert_args!($($params),*), Some({ + parity_wasm::elements::FunctionType::new(convert_args!($($params),*), vec![{ use $crate::wasm::env_def::ConvertibleToWasm; <$returns>::VALUE_TYPE - })) + }]) } ); } @@ -301,12 +301,12 @@ mod tests { fn macro_gen_signature() { assert_eq!( gen_signature!((i32)), - FunctionType::new(vec![ValueType::I32], None), + FunctionType::new(vec![ValueType::I32], vec![]), ); assert_eq!( gen_signature!( (i32, u32) -> u32 ), - FunctionType::new(vec![ValueType::I32, ValueType::I32], Some(ValueType::I32)), + FunctionType::new(vec![ValueType::I32, ValueType::I32], vec![ValueType::I32]), ); } @@ -348,10 +348,10 @@ mod tests { ); assert!( - Env::can_satisfy(b"seal0", b"seal_gas",&FunctionType::new(vec![ValueType::I32], None)) + Env::can_satisfy(b"seal0", b"seal_gas",&FunctionType::new(vec![ValueType::I32], vec![])) ); assert!( - !Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], None)) + !Env::can_satisfy(b"seal0", b"not_exists", &FunctionType::new(vec![], vec![])) ); } } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index f7fde5ba17861..969336b59fa39 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -27,14 +27,13 @@ mod runtime; use crate::{ CodeHash, Schedule, Config, wasm::env_def::FunctionImplProvider, - exec::{Ext, Executable, ExportedFunction}, + exec::{Ext, Executable, ExportedFunction, ExecResult}, gas::GasMeter, }; use sp_std::prelude::*; use sp_core::crypto::UncheckedFrom; use codec::{Encode, Decode}; use frame_support::dispatch::DispatchError; -use pallet_contracts_primitives::ExecResult; pub use self::runtime::{ReturnCode, Runtime, RuntimeToken}; #[cfg(feature = "runtime-benchmarks")] pub use self::code_cache::reinstrument; @@ -246,17 +245,20 @@ mod tests { use super::*; use crate::{ CodeHash, BalanceOf, Error, Pallet as Contracts, - exec::{Ext, StorageKey, AccountIdOf, Executable, SeedOf, BlockNumberOf, RentParams}, + exec::{ + Ext, StorageKey, AccountIdOf, Executable, SeedOf, BlockNumberOf, + RentParams, ExecError, ErrorOrigin, + }, gas::GasMeter, tests::{Test, Call, ALICE, BOB}, }; use std::collections::HashMap; - use sp_core::H256; + use sp_core::{Bytes, H256}; use hex_literal::hex; use sp_runtime::DispatchError; - use frame_support::{dispatch::DispatchResult, weights::Weight}; + use frame_support::{assert_ok, dispatch::DispatchResult, weights::Weight}; use assert_matches::assert_matches; - use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags, ExecError, ErrorOrigin}; + use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; use pretty_assertions::assert_eq; const GAS_LIMIT: Weight = 10_000_000_000; @@ -336,7 +338,7 @@ mod tests { Contracts::::contract_address(&ALICE, &code_hash, salt), ExecReturnValue { flags: ReturnFlags::empty(), - data: Vec::new(), + data: Bytes(Vec::new()), }, 0, )) @@ -367,7 +369,7 @@ mod tests { }); // Assume for now that it was just a plain transfer. // TODO: Add tests for different call outcomes. - Ok((ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }, 0)) + Ok((ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }, 0)) } fn terminate( &mut self, @@ -597,12 +599,12 @@ mod tests { #[test] fn contract_transfer() { let mut mock_ext = MockExt::default(); - let _ = execute( + assert_ok!(execute( CODE_TRANSFER, vec![], &mut mock_ext, &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + )); assert_eq!( &mock_ext.transfers, @@ -663,12 +665,12 @@ mod tests { #[test] fn contract_call() { let mut mock_ext = MockExt::default(); - let _ = execute( + assert_ok!(execute( CODE_CALL, vec![], &mut mock_ext, &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + )); assert_eq!( &mock_ext.transfers, @@ -739,12 +741,12 @@ mod tests { #[test] fn contract_instantiate() { let mut mock_ext = MockExt::default(); - let _ = execute( + assert_ok!(execute( CODE_INSTANTIATE, vec![], &mut mock_ext, &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + )); assert_matches!( &mock_ext.instantiates[..], @@ -851,12 +853,12 @@ mod tests { #[test] fn contract_call_limited_gas() { let mut mock_ext = MockExt::default(); - let _ = execute( + assert_ok!(execute( &CODE_TRANSFER_LIMITED_GAS, vec![], &mut mock_ext, &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + )); assert_eq!( &mock_ext.transfers, @@ -946,7 +948,10 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: [0x22; 32].to_vec() }); + assert_eq!(output, ExecReturnValue { + flags: ReturnFlags::empty(), + data: Bytes([0x22; 32].to_vec()) + }); } /// calls `seal_caller` and compares the result with the constant 42. @@ -994,12 +999,12 @@ mod tests { #[test] fn caller() { - let _ = execute( + assert_ok!(execute( CODE_CALLER, vec![], MockExt::default(), &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + )); } /// calls `seal_address` and compares the result with the constant 69. @@ -1047,12 +1052,12 @@ mod tests { #[test] fn address() { - let _ = execute( + assert_ok!(execute( CODE_ADDRESS, vec![], MockExt::default(), &mut GasMeter::new(GAS_LIMIT), - ).unwrap(); + )); } const CODE_BALANCE: &str = r#" @@ -1099,12 +1104,12 @@ mod tests { #[test] fn balance() { let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_BALANCE, vec![], MockExt::default(), &mut gas_meter, - ).unwrap(); + )); } const CODE_GAS_PRICE: &str = r#" @@ -1151,12 +1156,12 @@ mod tests { #[test] fn gas_price() { let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_GAS_PRICE, vec![], MockExt::default(), &mut gas_meter, - ).unwrap(); + )); } const CODE_GAS_LEFT: &str = r#" @@ -1209,7 +1214,7 @@ mod tests { &mut gas_meter, ).unwrap(); - let gas_left = Weight::decode(&mut output.data.as_slice()).unwrap(); + let gas_left = Weight::decode(&mut &*output.data).unwrap(); assert!(gas_left < GAS_LIMIT, "gas_left must be less than initial"); assert!(gas_left > gas_meter.gas_left(), "gas_left must be greater than final"); } @@ -1258,12 +1263,12 @@ mod tests { #[test] fn value_transferred() { let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_VALUE_TRANSFERRED, vec![], MockExt::default(), &mut gas_meter, - ).unwrap(); + )); } const CODE_RETURN_FROM_START_FN: &str = r#" @@ -1299,7 +1304,13 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: vec![1, 2, 3, 4] }); + assert_eq!( + output, + ExecReturnValue { + flags: ReturnFlags::empty(), + data: Bytes(vec![1, 2, 3, 4]) + } + ); } const CODE_TIMESTAMP_NOW: &str = r#" @@ -1346,12 +1357,12 @@ mod tests { #[test] fn now() { let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_TIMESTAMP_NOW, vec![], MockExt::default(), &mut gas_meter, - ).unwrap(); + )); } const CODE_MINIMUM_BALANCE: &str = r#" @@ -1397,12 +1408,12 @@ mod tests { #[test] fn minimum_balance() { let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_MINIMUM_BALANCE, vec![], MockExt::default(), &mut gas_meter, - ).unwrap(); + )); } const CODE_TOMBSTONE_DEPOSIT: &str = r#" @@ -1448,12 +1459,12 @@ mod tests { #[test] fn tombstone_deposit() { let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_TOMBSTONE_DEPOSIT, vec![], MockExt::default(), &mut gas_meter, - ).unwrap(); + )); } const CODE_RANDOM: &str = r#" @@ -1526,7 +1537,10 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F").to_vec(), + data: Bytes( + hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F") + .to_vec() + ), }, ); } @@ -1601,10 +1615,10 @@ mod tests { output, ExecReturnValue { flags: ReturnFlags::empty(), - data: ( + data: Bytes(( hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F"), 42u64, - ).encode(), + ).encode()), }, ); } @@ -1637,12 +1651,12 @@ mod tests { fn deposit_event() { let mut mock_ext = MockExt::default(); let mut gas_meter = GasMeter::new(GAS_LIMIT); - let _ = execute( + assert_ok!(execute( CODE_DEPOSIT_EVENT, vec![], &mut mock_ext, &mut gas_meter - ).unwrap(); + )); assert_eq!(mock_ext.events, vec![ (vec![H256::repeat_byte(0x33)], @@ -1837,7 +1851,10 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: hex!("445566778899").to_vec() }); + assert_eq!(output, ExecReturnValue { + flags: ReturnFlags::empty(), + data: Bytes(hex!("445566778899").to_vec()), + }); assert!(output.is_success()); } @@ -1850,7 +1867,10 @@ mod tests { &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - assert_eq!(output, ExecReturnValue { flags: ReturnFlags::REVERT, data: hex!("5566778899").to_vec() }); + assert_eq!(output, ExecReturnValue { + flags: ReturnFlags::REVERT, + data: Bytes(hex!("5566778899").to_vec()), + }); assert!(!output.is_success()); } @@ -1962,7 +1982,7 @@ mod tests { MockExt::default(), &mut GasMeter::new(GAS_LIMIT), ).unwrap(); - let rent_params = >::default().encode(); + let rent_params = Bytes(>::default().encode()); assert_eq!(output, ExecReturnValue { flags: ReturnFlags::empty(), data: rent_params }); } } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index 15556b0c5cd06..633edd4aaf8a4 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -152,8 +152,8 @@ impl<'a, T: Config> ContractModule<'a, T> { for wasm_type in type_section.types() { match wasm_type { Type::Function(func_type) => { - let return_type = func_type.return_type(); - for value_type in func_type.params().iter().chain(return_type.iter()) { + let return_type = func_type.results().get(0); + for value_type in func_type.params().iter().chain(return_type) { match value_type { ValueType::F32 | ValueType::F64 => return Err("use of floating point type in function types is forbidden"), @@ -273,15 +273,17 @@ impl<'a, T: Config> ContractModule<'a, T> { // Then check the signature. // Both "call" and "deploy" has a () -> () function type. + // We still support () -> (i32) for backwards compatibility. let func_ty_idx = func_entries.get(fn_idx as usize) .ok_or_else(|| "export refers to non-existent function")? .type_ref(); let Type::Function(ref func_ty) = types .get(func_ty_idx as usize) .ok_or_else(|| "function has a non-existent type")?; - if !func_ty.params().is_empty() || - !(func_ty.return_type().is_none() || - func_ty.return_type() == Some(ValueType::I32)) { + if !( + func_ty.params().is_empty() && + (func_ty.results().is_empty() || func_ty.results() == [ValueType::I32]) + ) { return Err("entry point has wrong signature"); } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index f3757e4c2b10d..bed56f409d579 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -19,7 +19,7 @@ use crate::{ Config, CodeHash, BalanceOf, Error, - exec::{Ext, StorageKey, TopicOf}, + exec::{Ext, StorageKey, TopicOf, ExecResult, ExecError}, gas::{GasMeter, Token, ChargedAmount}, wasm::env_def::ConvertibleToWasm, schedule::HostFnWeights, @@ -29,14 +29,14 @@ use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weigh use sp_std::prelude::*; use codec::{Decode, DecodeAll, Encode}; use sp_runtime::traits::SaturatedConversion; -use sp_core::crypto::UncheckedFrom; +use sp_core::{Bytes, crypto::UncheckedFrom}; use sp_io::hashing::{ keccak_256, blake2_256, blake2_128, sha2_256, }; -use pallet_contracts_primitives::{ExecResult, ExecReturnValue, ReturnFlags, ExecError}; +use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags}; /// Every error that can be returned to a contract when it calls any of the host functions. /// @@ -347,19 +347,19 @@ where )?; Ok(ExecReturnValue { flags, - data, + data: Bytes(data), }) }, TrapReason::Termination => { Ok(ExecReturnValue { flags: ReturnFlags::empty(), - data: Vec::new(), + data: Bytes(Vec::new()), }) }, TrapReason::Restoration => { Ok(ExecReturnValue { flags: ReturnFlags::empty(), - data: Vec::new(), + data: Bytes(Vec::new()), }) }, TrapReason::SupervisorError(error) => Err(error)?, @@ -370,7 +370,7 @@ where match sandbox_result { // No traps were generated. Proceed normally. Ok(_) => { - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Bytes(Vec::new()) }) } // `Error::Module` is returned only if instantiation or linking failed (i.e. // wasm binary tried to import a function that is not provided by the host). @@ -596,7 +596,7 @@ where /// Fallible conversion of a `ExecResult` to `ReturnCode`. fn exec_into_return_code(from: ExecResult) -> Result { - use pallet_contracts_primitives::ErrorOrigin::Callee; + use crate::exec::ErrorOrigin::Callee; let ExecError { error, origin } = match from { Ok(retval) => return Ok(retval.into()), diff --git a/frame/ddc-metrics-offchain-worker/src/lib.rs b/frame/ddc-metrics-offchain-worker/src/lib.rs index f6c87627cbeb5..8ec496fa467b3 100644 --- a/frame/ddc-metrics-offchain-worker/src/lib.rs +++ b/frame/ddc-metrics-offchain-worker/src/lib.rs @@ -328,11 +328,11 @@ impl Module where ::AccountId: AsRef<[u call_data, ); - let mut data = match &contract_exec_result.exec_result { + let mut data = match &contract_exec_result.result { Ok(v) => &v.data[..], Err(exec_error) => { // Return default value in case of error - warn!("[OCW] Error in call get_current_period_ms of smart contract. Return default value for period. Details: {:?}", exec_error.error); + warn!("[OCW] Error in call get_current_period_ms of smart contract. Return default value for period. Details: {:?}", exec_error); return Ok(Self::get_start_of_day_ms()); } }; @@ -566,12 +566,12 @@ impl Module where ::AccountId: AsRef<[u call_data, ); - let mut data = match &contract_exec_result.exec_result { + let mut data = match &contract_exec_result.result { Ok(v) => &v.data[..], Err(exec_error) => { warn!( "[OCW] Error in call get_all_ddc_nodes of smart contract. Error: {:?}", - exec_error.error + exec_error ); return Ok(Vec::new()); } diff --git a/frame/ddc-metrics-offchain-worker/src/tests/mod.rs b/frame/ddc-metrics-offchain-worker/src/tests/mod.rs index d61b9be8720a7..bb02ddda9edcf 100644 --- a/frame/ddc-metrics-offchain-worker/src/tests/mod.rs +++ b/frame/ddc-metrics-offchain-worker/src/tests/mod.rs @@ -334,7 +334,7 @@ fn should_run_contract() { 100_000_000_000, call_data, ); - match &contract_exec_result.exec_result { + match &contract_exec_result.result { Ok(res) => { //println!("XXX Contract returned {:?}", res.data); assert_eq!(res.data.len(), 8); // size of u64 diff --git a/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs b/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs index 2c5962e16d59f..82012aa5e116e 100644 --- a/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs +++ b/frame/ddc-metrics-offchain-worker/src/tests/test_runtime.rs @@ -66,6 +66,7 @@ parameter_types! { pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); } + impl frame_system::Config for Test { type BaseCallFilter = (); type BlockWeights = (); @@ -90,6 +91,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } impl pallet_balances::Config for Test { diff --git a/frame/ddc-pallet b/frame/ddc-pallet index b06d6d703d1c7..3225f9ca5adf1 160000 --- a/frame/ddc-pallet +++ b/frame/ddc-pallet @@ -1 +1 @@ -Subproject commit b06d6d703d1c7b613ce8ff6cef84c9123f63a637 +Subproject commit 3225f9ca5adf128fbead2adf7d89e8fb522be6cd diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 40bc99ec12e01..78bf9863fd145 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -21,8 +21,9 @@ use super::*; use frame_benchmarking::{benchmarks, account, whitelist_account, impl_benchmark_test_suite}; use frame_support::{ - IterableStorageMap, - traits::{Currency, Get, EnsureOrigin, OnInitialize, UnfilteredDispatchable, schedule::DispatchTime}, + assert_noop, assert_ok, IterableStorageMap, + traits::{Currency, Get, EnsureOrigin, OnInitialize, UnfilteredDispatchable, + schedule::DispatchTime}, }; use frame_system::{RawOrigin, Pallet as System, self, EventRecord}; use sp_runtime::traits::{Bounded, One}; @@ -206,11 +207,14 @@ benchmarks! { let origin = T::CancellationOrigin::successful_origin(); let referendum_index = add_referendum::(0)?; let call = Call::::emergency_cancel(referendum_index); - assert!(Democracy::::referendum_status(referendum_index).is_ok()); + assert_ok!(Democracy::::referendum_status(referendum_index)); }: { call.dispatch_bypass_filter(origin)? } verify { // Referendum has been canceled - assert!(Democracy::::referendum_status(referendum_index).is_err()); + assert_noop!( + Democracy::::referendum_status(referendum_index), + Error::::ReferendumInvalid, + ); } blacklist { @@ -224,18 +228,23 @@ benchmarks! { // Place our proposal in the external queue, too. let hash = T::Hashing::hash_of(&0); - assert!(Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()).is_ok()); + assert_ok!( + Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()) + ); // Add a referendum of our proposal. let referendum_index = add_referendum::(0)?; - assert!(Democracy::::referendum_status(referendum_index).is_ok()); + assert_ok!(Democracy::::referendum_status(referendum_index)); let call = Call::::blacklist(hash, Some(referendum_index)); let origin = T::BlacklistOrigin::successful_origin(); }: { call.dispatch_bypass_filter(origin)? } verify { // Referendum has been canceled - assert!(Democracy::::referendum_status(referendum_index).is_err()); + assert_noop!( + Democracy::::referendum_status(referendum_index), + Error::::ReferendumInvalid + ); } // Worst case scenario, we external propose a previously blacklisted proposal diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index b3b37b0b34b68..351204bfcb58f 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -1653,10 +1653,7 @@ impl Module { // To decode the enum variant we only need the first byte. let mut buf = [0u8; 1]; let key = >::hashed_key_for(proposal_hash); - let bytes = match sp_io::storage::read(&key, &mut buf, 0) { - Some(bytes) => bytes, - None => return Err(Error::::NotImminent.into()), - }; + let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::NotImminent)?; // The value may be smaller that 1 byte. let mut input = &buf[0..buf.len().min(bytes as usize)]; @@ -1684,10 +1681,7 @@ impl Module { // * at most 5 bytes to decode a `Compact` let mut buf = [0u8; 6]; let key = >::hashed_key_for(proposal_hash); - let bytes = match sp_io::storage::read(&key, &mut buf, 0) { - Some(bytes) => bytes, - None => return Err(Error::::PreimageMissing.into()), - }; + let bytes = sp_io::storage::read(&key, &mut buf, 0).ok_or_else(|| Error::::PreimageMissing)?; // The value may be smaller that 6 bytes. let mut input = &buf[0..buf.len().min(bytes as usize)]; @@ -1761,10 +1755,7 @@ impl Module { fn decode_compact_u32_at(key: &[u8]) -> Option { // `Compact` takes at most 5 bytes. let mut buf = [0u8; 5]; - let bytes = match sp_io::storage::read(&key, &mut buf, 0) { - Some(bytes) => bytes, - None => return None, - }; + let bytes = sp_io::storage::read(&key, &mut buf, 0)?; // The value may be smaller than 5 bytes. let mut input = &buf[0..buf.len().min(bytes as usize)]; match codec::Compact::::decode(&mut input) { diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 57e845ace9f24..73bbb5481dade 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -22,7 +22,7 @@ use super::*; use codec::Encode; use frame_support::{ assert_noop, assert_ok, parameter_types, ord_parameter_types, - traits::{Contains, OnInitialize, Filter}, + traits::{SortedMembers, OnInitialize, Filter}, weights::Weight, }; use sp_core::H256; @@ -103,6 +103,7 @@ impl frame_system::Config for Test { type OnKilledAccount = (); type SystemWeightInfo = (); type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { pub MaximumSchedulerWeight: Weight = Perbill::from_percent(80) * BlockWeights::get().max_block; @@ -119,9 +120,10 @@ impl pallet_scheduler::Config for Test { } parameter_types! { pub const ExistentialDeposit: u64 = 1; + pub const MaxLocks: u32 = 10; } impl pallet_balances::Config for Test { - type MaxLocks = (); + type MaxLocks = MaxLocks; type Balance = u64; type Event = Event; type DustRemoval = (); @@ -150,7 +152,7 @@ ord_parameter_types! { pub const Six: u64 = 6; } pub struct OneToFive; -impl Contains for OneToFive { +impl SortedMembers for OneToFive { fn sorted_members() -> Vec { vec![1, 2, 3, 4, 5] } diff --git a/frame/democracy/src/tests/decoders.rs b/frame/democracy/src/tests/decoders.rs index 52b61d8d9e7d0..0331ea3934479 100644 --- a/frame/democracy/src/tests/decoders.rs +++ b/frame/democracy/src/tests/decoders.rs @@ -58,12 +58,12 @@ fn pre_image() { let key = Default::default(); let missing = PreimageStatus::Missing(0); Preimages::::insert(key, missing); - assert!(Democracy::pre_image_data_len(key).is_err()); + assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); assert_eq!(Democracy::check_pre_image_is_missing(key), Ok(())); Preimages::::remove(key); - assert!(Democracy::pre_image_data_len(key).is_err()); - assert!(Democracy::check_pre_image_is_missing(key).is_err()); + assert_noop!(Democracy::pre_image_data_len(key), Error::::PreimageMissing); + assert_noop!(Democracy::check_pre_image_is_missing(key), Error::::NotImminent); for l in vec![0, 10, 100, 1000u32] { let available = PreimageStatus::Available{ @@ -76,7 +76,8 @@ fn pre_image() { Preimages::::insert(key, available); assert_eq!(Democracy::pre_image_data_len(key), Ok(l)); - assert!(Democracy::check_pre_image_is_missing(key).is_err()); + assert_noop!(Democracy::check_pre_image_is_missing(key), + Error::::DuplicatePreimage); } }) } diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index ff1a7a87da852..37654a5e91462 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -93,7 +93,7 @@ fn external_blacklisting_should_work() { assert_ok!(Democracy::blacklist(Origin::root(), hash, None)); fast_forward_to(2); - assert!(Democracy::referendum_status(0).is_err()); + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert_noop!( Democracy::external_propose( diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index 4785ef0a89467..4a4827ac7e9c1 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -129,9 +129,9 @@ fn blacklisting_should_work() { fast_forward_to(2); let hash = set_balance_proposal_hash(4); - assert!(Democracy::referendum_status(0).is_ok()); + assert_ok!(Democracy::referendum_status(0)); assert_ok!(Democracy::blacklist(Origin::root(), hash, Some(0))); - assert!(Democracy::referendum_status(0).is_err()); + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); }); } diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 207085ceb570f..13072ebf87b11 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -80,12 +80,12 @@ fn single_proposal_should_work() { fast_forward_to(3); // referendum still running - assert!(Democracy::referendum_status(0).is_ok()); + assert_ok!(Democracy::referendum_status(0)); // referendum runs during 2 and 3, ends @ start of 4. fast_forward_to(4); - assert!(Democracy::referendum_status(0).is_err()); + assert_noop!(Democracy::referendum_status(0), Error::::ReferendumInvalid); assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); // referendum passes and wait another two blocks for enactment. diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index 4b5178faa8e86..dcb9c9b0e75b6 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -14,7 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] static_assertions = "1.1.0" -serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } log = { version = "0.4.14", default-features = false } @@ -48,7 +47,6 @@ frame-benchmarking = { version = "3.1.0", path = "../benchmarking" } [features] default = ["std"] std = [ - "serde", "codec/std", "log/std", diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index 40c7e801ae78d..90e90d427dc6e 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -208,7 +208,7 @@ frame_benchmarking::benchmarks! { // assume a queued solution is stored, regardless of where it comes from. >::put(ready_solution); }: { - let _ = as ElectionProvider>::elect(); + assert_ok!( as ElectionProvider>::elect()); } verify { assert!(>::queued_solution().is_none()); assert!(>::get().is_none()); diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 1609ffa3beef0..c59d68a33adba 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -381,11 +381,11 @@ impl Default for ElectionCompute { #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct RawSolution { /// Compact election edges. - compact: C, + pub compact: C, /// The _claimed_ score of the solution. - score: ElectionScore, + pub score: ElectionScore, /// The round at which this solution should be submitted. - round: u32, + pub round: u32, } impl Default for RawSolution { @@ -402,13 +402,13 @@ pub struct ReadySolution { /// /// This is target-major vector, storing each winners, total backing, and each individual /// backer. - supports: Supports, + pub supports: Supports, /// The score of the solution. /// /// This is needed to potentially challenge the solution. - score: ElectionScore, + pub score: ElectionScore, /// How this election was computed. - compute: ElectionCompute, + pub compute: ElectionCompute, } /// A snapshot of all the data that is needed for en entire round. They are provided by @@ -432,10 +432,10 @@ pub struct RoundSnapshot { pub struct SolutionOrSnapshotSize { /// The length of voters. #[codec(compact)] - voters: u32, + pub voters: u32, /// The length of targets. #[codec(compact)] - targets: u32, + pub targets: u32, } /// Internal errors of the pallet. @@ -534,12 +534,19 @@ pub mod pallet { /// Maximum number of iteration of balancing that will be executed in the embedded miner of /// the pallet. type MinerMaxIterations: Get; + /// Maximum weight that the miner should consume. /// /// The miner will ensure that the total weight of the unsigned solution will not exceed - /// this values, based on [`WeightInfo::submit_unsigned`]. + /// this value, based on [`WeightInfo::submit_unsigned`]. type MinerMaxWeight: Get; + /// Maximum length (bytes) that the mined solution should consume. + /// + /// The miner will ensure that the total length of the unsigned solution will not exceed + /// this value. + type MinerMaxLength: Get; + /// Something that will provide the election data. type DataProvider: ElectionDataProvider; @@ -1411,7 +1418,7 @@ mod tests { roll_to(30); assert!(MultiPhase::current_phase().is_signed()); - let _ = MultiPhase::elect().unwrap(); + assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); assert!(MultiPhase::snapshot().is_none()); @@ -1434,7 +1441,7 @@ mod tests { assert!(MultiPhase::current_phase().is_off()); // this module is now only capable of doing on-chain backup. - let _ = MultiPhase::elect().unwrap(); + assert_ok!(MultiPhase::elect()); assert!(MultiPhase::current_phase().is_off()); }); diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 22b5a0ac67b7a..79e6e952bfec8 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -65,7 +65,7 @@ pub(crate) type TargetIndex = u16; sp_npos_elections::generate_solution_type!( #[compact] - pub struct TestCompact::(16) + pub struct TestCompact::(16) ); /// All events of this pallet. @@ -161,6 +161,7 @@ impl frame_system::Config for Runtime { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); + type OnSetCode = (); } const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); @@ -204,6 +205,7 @@ parameter_types! { pub static MinerTxPriority: u64 = 100; pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; + pub static MinerMaxLength: u32 = 256; pub static MockWeightInfo: bool = false; @@ -276,6 +278,7 @@ impl crate::Config for Runtime { type SolutionImprovementThreshold = SolutionImprovementThreshold; type MinerMaxIterations = MinerMaxIterations; type MinerMaxWeight = MinerMaxWeight; + type MinerMaxLength = MinerMaxLength; type MinerTxPriority = MinerTxPriority; type DataProvider = StakingMock; type WeightInfo = DualMockWeightInfo; diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index b570c4482814b..26e51cf58b34b 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -46,6 +46,8 @@ pub enum MinerError { PreDispatchChecksFailed, /// The solution generated from the miner is not feasible. Feasibility(FeasibilityError), + /// There are no more voters to remove to trim the solution. + NoMoreVoters, } impl From for MinerError { @@ -168,13 +170,22 @@ impl Pallet { size, T::MinerMaxWeight::get(), ); + log!( debug, - "miner: current compact solution voters = {}, maximum_allowed = {}", + "initial solution voters = {}, snapshot = {:?}, maximum_allowed(capped) = {}", compact.voter_count(), + size, maximum_allowed_voters, ); - let compact = Self::trim_compact(maximum_allowed_voters, compact, &voter_index)?; + + // trim length and weight + let compact = Self::trim_compact_weight(maximum_allowed_voters, compact, &voter_index)?; + let compact = Self::trim_compact_length( + T::MinerMaxLength::get(), + compact, + &voter_index, + )?; // re-calc score. let winners = sp_npos_elections::to_without_backing(winners); @@ -217,7 +228,7 @@ impl Pallet { /// /// Indeed, the score must be computed **after** this step. If this step reduces the score too /// much or remove a winner, then the solution must be discarded **after** this step. - pub fn trim_compact( + pub fn trim_compact_weight( maximum_allowed_voters: u32, mut compact: CompactOf, voter_index: FN, @@ -252,15 +263,61 @@ impl Pallet { } } + log!(debug, "removed {} voter to meet the max weight limit.", to_remove); Ok(compact) } _ => { // nada, return as-is + log!(debug, "didn't remove any voter for weight limits."); Ok(compact) } } } + /// Greedily reduce the size of the solution to fit into the block w.r.t length. + /// + /// The length of the solution is largely a function of the number of voters. The number of + /// winners cannot be changed. Thus, to reduce the solution size, we need to strip voters. + /// + /// Note that this solution is already computed, and winners are elected based on the merit of + /// the total stake in the system. Nevertheless, some of the voters may be removed here. + /// + /// Sometimes, removing a voter can cause a validator to also be implicitly removed, if + /// that voter was the only backer of that winner. In such cases, this solution is invalid, which + /// will be caught prior to submission. + /// + /// The score must be computed **after** this step. If this step reduces the score too much, + /// then the solution must be discarded. + pub fn trim_compact_length( + max_allowed_length: u32, + mut compact: CompactOf, + voter_index: impl Fn(&T::AccountId) -> Option>, + ) -> Result, MinerError> { + // short-circuit to avoid getting the voters if possible + // this involves a redundant encoding, but that should hopefully be relatively cheap + if (compact.encoded_size().saturated_into::()) <= max_allowed_length { + return Ok(compact); + } + + // grab all voters and sort them by least stake. + let RoundSnapshot { voters, .. } = + Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; + let mut voters_sorted = voters + .into_iter() + .map(|(who, stake, _)| (who.clone(), stake)) + .collect::>(); + voters_sorted.sort_by_key(|(_, y)| *y); + voters_sorted.reverse(); + + while compact.encoded_size() > max_allowed_length.saturated_into() { + let (smallest_stake_voter, _) = voters_sorted.pop().ok_or(MinerError::NoMoreVoters)?; + let index = voter_index(&smallest_stake_voter).ok_or(MinerError::SnapshotUnAvailable)?; + compact.remove_voter(index); + } + + Ok(compact) + } + /// Find the maximum `len` that a compact can have in order to fit into the block weight. /// /// This only returns a value between zero and `size.nominators`. @@ -298,6 +355,7 @@ impl Pallet { // First binary-search the right amount of voters let mut step = voters / 2; let mut current_weight = weight_with(voters); + while step > 0 { match next_voters(current_weight, voters, step) { // proceed with the binary search @@ -324,13 +382,14 @@ impl Pallet { voters -= 1; } + let final_decision = voters.min(size.voters); debug_assert!( - weight_with(voters.min(size.voters)) <= max_weight, + weight_with(final_decision) <= max_weight, "weight_with({}) <= {}", - voters.min(size.voters), + final_decision, max_weight, ); - voters.min(size.voters) + final_decision } /// Checks if an execution of the offchain worker is permitted at the given block number, or @@ -498,6 +557,7 @@ mod tests { Call, *, }; use frame_support::{dispatch::Dispatchable, traits::OffchainWorker}; + use helpers::voter_index_fn_linear; use mock::Call as OuterCall; use frame_election_provider_support::Assignment; use sp_runtime::{traits::ValidateUnsigned, PerU16}; @@ -642,7 +702,7 @@ mod tests { #[test] #[should_panic(expected = "Invalid unsigned submission must produce invalid block and \ deprive validator from their authoring reward.: \ - DispatchError::Module { index: 2, error: 1, message: \ + Module { index: 2, error: 1, message: \ Some(\"PreDispatchWrongWinnerCount\") }")] fn unfeasible_solution_panics() { ExtBuilder::default().build_and_execute(|| { @@ -824,20 +884,20 @@ mod tests { assert!(MultiPhase::try_acquire_offchain_lock(25).is_ok()); // next block: rejected. - assert!(MultiPhase::try_acquire_offchain_lock(26).is_err()); + assert_noop!(MultiPhase::try_acquire_offchain_lock(26), "recently executed."); // allowed after `OFFCHAIN_REPEAT` assert!(MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT).into()).is_ok()); // a fork like situation: re-execute last 3. - assert!( - MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 3).into()).is_err() + assert_noop!( + MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 3).into()), "fork." ); - assert!( - MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 2).into()).is_err() + assert_noop!( + MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 2).into()), "fork." ); - assert!( - MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 1).into()).is_err() + assert_noop!( + MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 1).into()), "fork." ); }) } @@ -881,4 +941,116 @@ mod tests { assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned(_, _)))); }) } + + #[test] + fn trim_compact_length_does_not_modify_when_short_enough() { + let mut ext = ExtBuilder::default().build(); + ext.execute_with(|| { + roll_to(25); + + // given + let RoundSnapshot { voters, ..} = MultiPhase::snapshot().unwrap(); + let RawSolution { mut compact, .. } = raw_solution(); + let encoded_len = compact.encode().len() as u32; + let compact_clone = compact.clone(); + + // when + assert!(encoded_len < ::MinerMaxLength::get()); + + // then + compact = MultiPhase::trim_compact_length( + encoded_len, + compact, + voter_index_fn_linear::(&voters), + ).unwrap(); + assert_eq!(compact, compact_clone); + }); + } + + #[test] + fn trim_compact_length_modifies_when_too_long() { + let mut ext = ExtBuilder::default().build(); + ext.execute_with(|| { + roll_to(25); + + let RoundSnapshot { voters, ..} = + MultiPhase::snapshot().unwrap(); + + let RawSolution { mut compact, .. } = raw_solution(); + let encoded_len = compact.encoded_size() as u32; + let compact_clone = compact.clone(); + + compact = MultiPhase::trim_compact_length( + encoded_len - 1, + compact, + voter_index_fn_linear::(&voters), + ).unwrap(); + + assert_ne!(compact, compact_clone); + assert!((compact.encoded_size() as u32) < encoded_len); + }); + } + + #[test] + fn trim_compact_length_trims_lowest_stake() { + let mut ext = ExtBuilder::default().build(); + ext.execute_with(|| { + roll_to(25); + + let RoundSnapshot { voters, ..} = + MultiPhase::snapshot().unwrap(); + + let RawSolution { mut compact, .. } = raw_solution(); + let encoded_len = compact.encoded_size() as u32; + let voter_count = compact.voter_count(); + let min_stake_voter = voters.iter() + .map(|(id, weight, _)| (weight, id)) + .min() + .map(|(_, id)| id) + .unwrap(); + + + compact = MultiPhase::trim_compact_length( + encoded_len - 1, + compact, + voter_index_fn_linear::(&voters), + ).unwrap(); + + assert_eq!(compact.voter_count(), voter_count - 1, "we must have removed exactly 1 voter"); + + let assignments = compact.into_assignment( + |voter| Some(voter as AccountId), + |target| Some(target as AccountId), + ).unwrap(); + assert!( + assignments.iter() + .all(|Assignment{ who, ..}| who != min_stake_voter), + "min_stake_voter must no longer be in the set of voters", + ); + }); + } + + // all the other solution-generation functions end up delegating to `mine_solution`, so if we + // demonstrate that `mine_solution` solutions are all trimmed to an acceptable length, then + // we know that higher-level functions will all also have short-enough solutions. + #[test] + fn mine_solution_solutions_always_within_acceptable_length() { + let mut ext = ExtBuilder::default().build(); + ext.execute_with(|| { + roll_to(25); + + // how long would the default solution be? + let solution = MultiPhase::mine_solution(0).unwrap(); + let max_length = ::MinerMaxLength::get(); + let solution_size = solution.0.compact.encoded_size(); + assert!(solution_size <= max_length as usize); + + // now set the max size to less than the actual size and regenerate + ::MinerMaxLength::set(solution_size as u32 - 1); + let solution = MultiPhase::mine_solution(0).unwrap(); + let max_length = ::MinerMaxLength::get(); + let solution_size = solution.0.compact.encoded_size(); + assert!(solution_size <= max_length as usize); + }); + } } diff --git a/frame/elections-phragmen/CHANGELOG.md b/frame/elections-phragmen/CHANGELOG.md index 3d48448fa55ec..231de1d2e475e 100644 --- a/frame/elections-phragmen/CHANGELOG.md +++ b/frame/elections-phragmen/CHANGELOG.md @@ -4,7 +4,18 @@ All notable changes to this crate will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this crate adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [3.0.0] - UNRELEASED +## [4.0.0] - UNRELEASED + +### Added + +### Changed +\[**Needs Migration**\] [migrate pallet-elections-phragmen to attribute macros](https://github.com/paritytech/substrate/pull/8044) + +### Fixed + +### Security + +## [3.0.0] ### Added [Add slashing events to elections-phragmen](https://github.com/paritytech/substrate/pull/7543) diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 89723cb85fbe1..aa2b564f73f24 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections-phragmen" -version = "3.0.0" +version = "4.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -14,17 +14,17 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } +sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-core = { version = "3.0.0", default-features = false, path = "../../primitives/core" } frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } log = { version = "0.4.14", default-features = false } [dev-dependencies] -sp-io = { version = "3.0.0", path = "../../primitives/io" } hex-literal = "0.3.1" pallet-balances = { version = "3.0.0", path = "../balances" } sp-core = { version = "3.0.0", path = "../../primitives/core" } @@ -33,13 +33,14 @@ substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } [features] default = ["std"] std = [ - "serde", "codec/std", "frame-support/std", "sp-runtime/std", "sp-npos-elections/std", "frame-system/std", "sp-std/std", + "sp-io/std", + "sp-core/std", "log/std", ] runtime-benchmarks = [ diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index cfdcd80207958..3534a62ac3ce0 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -22,10 +22,10 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; -use frame_support::traits::OnInitialize; +use frame_benchmarking::{benchmarks, account, whitelist, impl_benchmark_test_suite}; +use frame_support::{traits::OnInitialize, dispatch::DispatchResultWithPostInfo}; -use crate::Module as Elections; +use crate::Pallet as Elections; const BALANCE_FACTOR: u32 = 250; const MAX_VOTERS: u32 = 500; @@ -33,14 +33,6 @@ const MAX_CANDIDATES: u32 = 200; type Lookup = <::Lookup as StaticLookup>::Source; -macro_rules! whitelist { - ($acc:ident) => { - frame_benchmarking::benchmarking::add_to_whitelist( - frame_system::Account::::hashed_key_for(&$acc).into() - ); - }; -} - /// grab new account with infinite balance. fn endowed_account(name: &'static str, index: u32) -> T::AccountId { let account: T::AccountId = account(name, index, 0); @@ -95,11 +87,12 @@ fn submit_candidates_with_self_vote(c: u32, prefix: &'static str) Ok(candidates) } - /// Submit one voter. -fn submit_voter(caller: T::AccountId, votes: Vec, stake: BalanceOf) - -> frame_support::dispatch::DispatchResult -{ +fn submit_voter( + caller: T::AccountId, + votes: Vec, + stake: BalanceOf, +) -> DispatchResultWithPostInfo { >::vote(RawOrigin::Signed(caller).into(), votes, stake) } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 5031cb57e6428..dafcc3dd5910e 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -100,18 +100,14 @@ use codec::{Decode, Encode}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}, - ensure, - storage::{IterableStorageMap, StorageMap}, + dispatch::{WithPostDispatchInfo}, traits::{ ChangeMembers, Contains, ContainsLengthBound, Currency, CurrencyToVote, Get, InitializeMembers, LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, - WithdrawReasons, + WithdrawReasons, SortedMembers, }, weights::Weight, }; -use frame_system::{ensure_root, ensure_signed}; use sp_npos_elections::{ElectionResult, ExtendedBalance}; use sp_runtime::{ traits::{Saturating, StaticLookup, Zero}, @@ -123,7 +119,8 @@ mod benchmarking; pub mod weights; pub use weights::WeightInfo; -pub mod migrations_3_0_0; +/// All migrations. +pub mod migrations; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; @@ -170,213 +167,97 @@ pub struct SeatHolder { pub deposit: Balance, } -pub trait Config: frame_system::Config { - /// The overarching event type.c - type Event: From> + Into<::Event>; +pub use pallet::*; - /// Identifier for the elections-phragmen pallet's lock - type ModuleId: Get; - - /// The currency that people are electing with. - type Currency: - LockableCurrency + - ReservableCurrency; - - /// What to do when the members change. - type ChangeMembers: ChangeMembers; - - /// What to do with genesis members - type InitializeMembers: InitializeMembers; - - /// Convert a balance into a number used for election calculation. - /// This must fit into a `u64` but is allowed to be sensibly lossy. - type CurrencyToVote: CurrencyToVote>; - - /// How much should be locked up in order to submit one's candidacy. - type CandidacyBond: Get>; - - /// Base deposit associated with voting. - /// - /// This should be sensibly high to economically ensure the pallet cannot be attacked by - /// creating a gigantic number of votes. - type VotingBondBase: Get>; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// The amount of bond that need to be locked for each vote (32 bytes). - type VotingBondFactor: Get>; + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From> + + IsType<::Event>; - /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) - type LoserCandidate: OnUnbalanced>; + /// Identifier for the elections-phragmen pallet's lock + #[pallet::constant] + type PalletId: Get; - /// Handler for the unbalanced reduction when a member has been kicked. - type KickedMember: OnUnbalanced>; + /// The currency that people are electing with. + type Currency: LockableCurrency + + ReservableCurrency; - /// Number of members to elect. - type DesiredMembers: Get; + /// What to do when the members change. + type ChangeMembers: ChangeMembers; - /// Number of runners_up to keep. - type DesiredRunnersUp: Get; + /// What to do with genesis members + type InitializeMembers: InitializeMembers; - /// How long each seat is kept. This defines the next block number at which an election - /// round will happen. If set to zero, no elections are ever triggered and the module will - /// be in passive mode. - type TermDuration: Get; + /// Convert a balance into a number used for election calculation. + /// This must fit into a `u64` but is allowed to be sensibly lossy. + type CurrencyToVote: CurrencyToVote>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// How much should be locked up in order to submit one's candidacy. + #[pallet::constant] + type CandidacyBond: Get>; -decl_storage! { - trait Store for Module as PhragmenElection { - /// The current elected members. + /// Base deposit associated with voting. /// - /// Invariant: Always sorted based on account id. - pub Members get(fn members): Vec>>; + /// This should be sensibly high to economically ensure the pallet cannot be attacked by + /// creating a gigantic number of votes. + #[pallet::constant] + type VotingBondBase: Get>; - /// The current reserved runners-up. - /// - /// Invariant: Always sorted based on rank (worse to best). Upon removal of a member, the - /// last (i.e. _best_) runner-up will be replaced. - pub RunnersUp get(fn runners_up): Vec>>; + /// The amount of bond that need to be locked for each vote (32 bytes). + #[pallet::constant] + type VotingBondFactor: Get>; - /// The present candidate list. A current member or runner-up can never enter this vector - /// and is always implicitly assumed to be a candidate. - /// - /// Second element is the deposit. - /// - /// Invariant: Always sorted based on account id. - pub Candidates get(fn candidates): Vec<(T::AccountId, BalanceOf)>; + /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) + type LoserCandidate: OnUnbalanced>; - /// The total number of vote rounds that have happened, excluding the upcoming one. - pub ElectionRounds get(fn election_rounds): u32 = Zero::zero(); + /// Handler for the unbalanced reduction when a member has been kicked. + type KickedMember: OnUnbalanced>; - /// Votes and locked stake of a particular voter. - /// - /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. - pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => Voter>; - } add_extra_genesis { - config(members): Vec<(T::AccountId, BalanceOf)>; - build(|config: &GenesisConfig| { - assert!( - config.members.len() as u32 <= T::DesiredMembers::get(), - "Cannot accept more than DesiredMembers genesis member", - ); - let members = config.members.iter().map(|(ref member, ref stake)| { - // make sure they have enough stake. - assert!( - T::Currency::free_balance(member) >= *stake, - "Genesis member does not have enough stake.", - ); + /// Number of members to elect. + #[pallet::constant] + type DesiredMembers: Get; - // Note: all members will only vote for themselves, hence they must be given exactly - // their own stake as total backing. Any sane election should behave as such. - // Nonetheless, stakes will be updated for term 1 onwards according to the election. - Members::::mutate(|members| { - match members.binary_search_by(|m| m.who.cmp(member)) { - Ok(_) => panic!("Duplicate member in elections-phragmen genesis: {}", member), - Err(pos) => members.insert( - pos, - SeatHolder { who: member.clone(), stake: *stake, deposit: Zero::zero() }, - ), - } - }); + /// Number of runners_up to keep. + #[pallet::constant] + type DesiredRunnersUp: Get; - // set self-votes to make persistent. Genesis voters don't have any bond, nor do - // they have any lock. NOTE: this means that we will still try to remove a lock once - // this genesis voter is removed, and for now it is okay because remove_lock is noop - // if lock is not there. - >::insert( - &member, - Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, - ); + /// How long each seat is kept. This defines the next block number at which an election + /// round will happen. If set to zero, no elections are ever triggered and the module will + /// be in passive mode. + #[pallet::constant] + type TermDuration: Get; - member.clone() - }).collect::>(); - - // report genesis members to upstream, if any. - T::InitializeMembers::initialize_members(&members); - }) + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; } -} -decl_error! { - pub enum Error for Module { - /// Cannot vote when no candidates or members exist. - UnableToVote, - /// Must vote for at least one candidate. - NoVotes, - /// Cannot vote more than candidates. - TooManyVotes, - /// Cannot vote more than maximum allowed. - MaximumVotesExceeded, - /// Cannot vote with stake less than minimum balance. - LowBalance, - /// Voter can not pay voting bond. - UnableToPayBond, - /// Must be a voter. - MustBeVoter, - /// Cannot report self. - ReportSelf, - /// Duplicated candidate submission. - DuplicatedCandidate, - /// Member cannot re-submit candidacy. - MemberSubmit, - /// Runner cannot re-submit candidacy. - RunnerUpSubmit, - /// Candidate does not have enough funds. - InsufficientCandidateFunds, - /// Not a member. - NotMember, - /// The provided count of number of candidates is incorrect. - InvalidWitnessData, - /// The provided count of number of votes is incorrect. - InvalidVoteCount, - /// The renouncing origin presented a wrong `Renouncing` parameter. - InvalidRenouncing, - /// Prediction regarding replacement after member removal is wrong. - InvalidReplacement, - } -} + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); -decl_event!( - pub enum Event where Balance = BalanceOf, ::AccountId { - /// A new term with \[new_members\]. This indicates that enough candidates existed to run the - /// election, not that enough have has been elected. The inner value must be examined for - /// this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond slashed and - /// none were elected, whilst `EmptyTerm` means that no candidates existed to begin with. - NewTerm(Vec<(AccountId, Balance)>), - /// No (or not enough) candidates existed for this round. This is different from - /// `NewTerm(\[\])`. See the description of `NewTerm`. - EmptyTerm, - /// Internal error happened while trying to perform election. - ElectionError, - /// A \[member\] has been removed. This should always be followed by either `NewTerm` or - /// `EmptyTerm`. - MemberKicked(AccountId), - /// Someone has renounced their candidacy. - Renounced(AccountId), - /// A \[candidate\] was slashed by \[amount\] due to failing to obtain a seat as member or - /// runner-up. + #[pallet::hooks] + impl Hooks> for Pallet { + /// What to do at the end of each block. /// - /// Note that old members and runners-up are also candidates. - CandidateSlashed(AccountId, Balance), - /// A \[seat holder\] was slashed by \[amount\] by being forcefully removed from the set. - SeatHolderSlashed(AccountId, Balance), + /// Checks if an election needs to happen or not. + fn on_initialize(n: T::BlockNumber) -> Weight { + let term_duration = T::TermDuration::get(); + if !term_duration.is_zero() && (n % term_duration).is_zero() { + Self::do_phragmen() + } else { + 0 + } + } } -); - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - fn deposit_event() = default; - - const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - const VotingBondBase: BalanceOf = T::VotingBondBase::get(); - const VotingBondFactor: BalanceOf = T::VotingBondFactor::get(); - const DesiredMembers: u32 = T::DesiredMembers::get(); - const DesiredRunnersUp: u32 = T::DesiredRunnersUp::get(); - const TermDuration: T::BlockNumber = T::TermDuration::get(); - const ModuleId: LockIdentifier = T::ModuleId::get(); + #[pallet::call] + impl Pallet { /// Vote for a set of candidates for the upcoming round of election. This can be called to /// set the initial votes, or update already existing votes. /// @@ -400,16 +281,16 @@ decl_module! { /// # /// We assume the maximum weight among all 3 cases: vote_equal, vote_more and vote_less. /// # - #[weight = + #[pallet::weight( T::WeightInfo::vote_more(votes.len() as u32) .max(T::WeightInfo::vote_less(votes.len() as u32)) .max(T::WeightInfo::vote_equal(votes.len() as u32)) - ] - fn vote( - origin, + )] + pub(crate) fn vote( + origin: OriginFor, votes: Vec, - #[compact] value: BalanceOf, - ) { + #[pallet::compact] value: BalanceOf, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; // votes should not be empty and more than `MAXIMUM_VOTE` in any case. @@ -423,9 +304,8 @@ decl_module! { // can never submit a vote of there are no members, and cannot submit more votes than // all potential vote targets. // addition is valid: candidates, members and runners-up will never overlap. - let allowed_votes = candidates_count - .saturating_add(members_count) - .saturating_add(runners_up_count); + let allowed_votes = + candidates_count.saturating_add(members_count).saturating_add(runners_up_count); ensure!(!allowed_votes.is_zero(), Error::::UnableToVote); ensure!(votes.len() <= allowed_votes, Error::::TooManyVotes); @@ -438,27 +318,29 @@ decl_module! { Ordering::Greater => { // Must reserve a bit more. let to_reserve = new_deposit - old_deposit; - T::Currency::reserve(&who, to_reserve).map_err(|_| Error::::UnableToPayBond)?; - }, - Ordering::Equal => {}, + T::Currency::reserve(&who, to_reserve) + .map_err(|_| Error::::UnableToPayBond)?; + } + Ordering::Equal => {} Ordering::Less => { // Must unreserve a bit. let to_unreserve = old_deposit - new_deposit; let _remainder = T::Currency::unreserve(&who, to_unreserve); debug_assert!(_remainder.is_zero()); - }, + } }; // Amount to be locked up. let locked_stake = value.min(T::Currency::total_balance(&who)); T::Currency::set_lock( - T::ModuleId::get(), + T::PalletId::get(), &who, locked_stake, WithdrawReasons::all(), ); Voting::::insert(&who, Voter { votes, deposit: new_deposit, stake: locked_stake }); + Ok(None.into()) } /// Remove `origin` as a voter. @@ -466,11 +348,12 @@ decl_module! { /// This removes the lock and returns the deposit. /// /// The dispatch origin of this call must be signed and be a voter. - #[weight = T::WeightInfo::remove_voter()] - fn remove_voter(origin) { + #[pallet::weight(T::WeightInfo::remove_voter())] + pub(crate) fn remove_voter(origin: OriginFor) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(Self::is_voter(&who), Error::::MustBeVoter); Self::do_remove_voter(&who); + Ok(None.into()) } /// Submit oneself for candidacy. A fixed amount of deposit is recorded. @@ -488,15 +371,15 @@ decl_module! { /// # /// The number of current candidates must be provided as witness data. /// # - #[weight = T::WeightInfo::submit_candidacy(*candidate_count)] - fn submit_candidacy(origin, #[compact] candidate_count: u32) { + #[pallet::weight(T::WeightInfo::submit_candidacy(*candidate_count))] + pub(crate) fn submit_candidacy( + origin: OriginFor, + #[pallet::compact] candidate_count: u32, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; let actual_count = >::decode_len().unwrap_or(0); - ensure!( - actual_count as u32 <= candidate_count, - Error::::InvalidWitnessData, - ); + ensure!(actual_count as u32 <= candidate_count, Error::::InvalidWitnessData,); let index = Self::is_candidate(&who).err().ok_or(Error::::DuplicatedCandidate)?; @@ -507,6 +390,7 @@ decl_module! { .map_err(|_| Error::::InsufficientCandidateFunds)?; >::mutate(|c| c.insert(index, (who, T::CandidacyBond::get()))); + Ok(None.into()) } /// Renounce one's intention to be a candidate for the next election round. 3 potential @@ -518,27 +402,30 @@ decl_module! { /// origin is removed as a runner-up. /// - `origin` is a current member. In this case, the deposit is unreserved and origin is /// removed as a member, consequently not being a candidate for the next round anymore. - /// Similar to [`remove_members`], if replacement runners exists, they are immediately used. - /// If the prime is renouncing, then no prime will exist until the next round. + /// Similar to [`remove_members`], if replacement runners exists, they are immediately + /// used. If the prime is renouncing, then no prime will exist until the next round. /// /// The dispatch origin of this call must be signed, and have one of the above roles. /// /// # /// The type of renouncing must be provided as witness data. /// # - #[weight = match *renouncing { + #[pallet::weight(match *renouncing { Renouncing::Candidate(count) => T::WeightInfo::renounce_candidacy_candidate(count), Renouncing::Member => T::WeightInfo::renounce_candidacy_members(), Renouncing::RunnerUp => T::WeightInfo::renounce_candidacy_runners_up(), - }] - fn renounce_candidacy(origin, renouncing: Renouncing) { + })] + pub(crate) fn renounce_candidacy( + origin: OriginFor, + renouncing: Renouncing, + ) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; match renouncing { Renouncing::Member => { let _ = Self::remove_and_replace_member(&who, false) .map_err(|_| Error::::InvalidRenouncing)?; - Self::deposit_event(RawEvent::Renounced(who)); - }, + Self::deposit_event(Event::Renounced(who)); + } Renouncing::RunnerUp => { >::try_mutate::<_, Error, _>(|runners_up| { let index = runners_up @@ -549,7 +436,7 @@ decl_module! { let SeatHolder { deposit, .. } = runners_up.remove(index); let _remainder = T::Currency::unreserve(&who, deposit); debug_assert!(_remainder.is_zero()); - Self::deposit_event(RawEvent::Renounced(who)); + Self::deposit_event(Event::Renounced(who)); Ok(()) })?; } @@ -562,11 +449,12 @@ decl_module! { let (_removed, deposit) = candidates.remove(index); let _remainder = T::Currency::unreserve(&who, deposit); debug_assert!(_remainder.is_zero()); - Self::deposit_event(RawEvent::Renounced(who)); + Self::deposit_event(Event::Renounced(who)); Ok(()) })?; } }; + Ok(None.into()) } /// Remove a particular member from the set. This is effective immediately and the bond of @@ -583,13 +471,13 @@ decl_module! { /// If we have a replacement, we use a small weight. Else, since this is a root call and /// will go into phragmen, we assume full block for now. /// # - #[weight = if *has_replacement { + #[pallet::weight(if *has_replacement { T::WeightInfo::remove_member_with_replacement() } else { T::BlockWeights::get().max_block - }] - fn remove_member( - origin, + })] + pub(crate) fn remove_member( + origin: OriginFor, who: ::Source, has_replacement: bool, ) -> DispatchResultWithPostInfo { @@ -601,13 +489,13 @@ decl_module! { // In both cases, we will change more weight than need. Refund and abort. return Err(Error::::InvalidReplacement.with_weight( // refund. The weight value comes from a benchmark which is special to this. - T::WeightInfo::remove_member_wrong_refund() + T::WeightInfo::remove_member_wrong_refund(), )); } let had_replacement = Self::remove_and_replace_member(&who, true)?; debug_assert_eq!(has_replacement, had_replacement); - Self::deposit_event(RawEvent::MemberKicked(who.clone())); + Self::deposit_event(Event::MemberKicked(who.clone())); if !had_replacement { Self::do_phragmen(); @@ -627,36 +515,197 @@ decl_module! { /// # /// The total number of voters and those that are defunct must be provided as witness data. /// # - #[weight = T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct)] - fn clean_defunct_voters(origin, _num_voters: u32, _num_defunct: u32) { + #[pallet::weight(T::WeightInfo::clean_defunct_voters(*_num_voters, *_num_defunct))] + pub(crate) fn clean_defunct_voters( + origin: OriginFor, + _num_voters: u32, + _num_defunct: u32, + ) -> DispatchResultWithPostInfo { let _ = ensure_root(origin)?; >::iter() .filter(|(_, x)| Self::is_defunct_voter(&x.votes)) - .for_each(|(dv, _)| { - Self::do_remove_voter(&dv) - }) + .for_each(|(dv, _)| Self::do_remove_voter(&dv)); + + Ok(None.into()) } + } - /// What to do at the end of each block. + #[pallet::event] + #[pallet::metadata( + ::AccountId = "AccountId", + BalanceOf = "Balance", + Vec<(::AccountId, BalanceOf)> = "Vec<(AccountId, Balance)>", + )] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A new term with \[new_members\]. This indicates that enough candidates existed to run + /// the election, not that enough have has been elected. The inner value must be examined + /// for this purpose. A `NewTerm(\[\])` indicates that some candidates got their bond + /// slashed and none were elected, whilst `EmptyTerm` means that no candidates existed to + /// begin with. + NewTerm(Vec<(::AccountId, BalanceOf)>), + /// No (or not enough) candidates existed for this round. This is different from + /// `NewTerm(\[\])`. See the description of `NewTerm`. + EmptyTerm, + /// Internal error happened while trying to perform election. + ElectionError, + /// A \[member\] has been removed. This should always be followed by either `NewTerm` or + /// `EmptyTerm`. + MemberKicked(::AccountId), + /// Someone has renounced their candidacy. + Renounced(::AccountId), + /// A \[candidate\] was slashed by \[amount\] due to failing to obtain a seat as member or + /// runner-up. /// - /// Checks if an election needs to happen or not. - fn on_initialize(n: T::BlockNumber) -> Weight { - let term_duration = T::TermDuration::get(); - if !term_duration.is_zero() && (n % term_duration).is_zero() { - Self::do_phragmen() - } else { - 0 - } + /// Note that old members and runners-up are also candidates. + CandidateSlashed(::AccountId, BalanceOf), + /// A \[seat holder\] was slashed by \[amount\] by being forcefully removed from the set. + SeatHolderSlashed(::AccountId, BalanceOf), + } + + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + #[pallet::error] + pub enum Error { + /// Cannot vote when no candidates or members exist. + UnableToVote, + /// Must vote for at least one candidate. + NoVotes, + /// Cannot vote more than candidates. + TooManyVotes, + /// Cannot vote more than maximum allowed. + MaximumVotesExceeded, + /// Cannot vote with stake less than minimum balance. + LowBalance, + /// Voter can not pay voting bond. + UnableToPayBond, + /// Must be a voter. + MustBeVoter, + /// Cannot report self. + ReportSelf, + /// Duplicated candidate submission. + DuplicatedCandidate, + /// Member cannot re-submit candidacy. + MemberSubmit, + /// Runner cannot re-submit candidacy. + RunnerUpSubmit, + /// Candidate does not have enough funds. + InsufficientCandidateFunds, + /// Not a member. + NotMember, + /// The provided count of number of candidates is incorrect. + InvalidWitnessData, + /// The provided count of number of votes is incorrect. + InvalidVoteCount, + /// The renouncing origin presented a wrong `Renouncing` parameter. + InvalidRenouncing, + /// Prediction regarding replacement after member removal is wrong. + InvalidReplacement, + } + + /// The current elected members. + /// + /// Invariant: Always sorted based on account id. + #[pallet::storage] + #[pallet::getter(fn members)] + pub type Members = + StorageValue<_, Vec>>, ValueQuery>; + + /// The current reserved runners-up. + /// + /// Invariant: Always sorted based on rank (worse to best). Upon removal of a member, the + /// last (i.e. _best_) runner-up will be replaced. + #[pallet::storage] + #[pallet::getter(fn runners_up)] + pub type RunnersUp = + StorageValue<_, Vec>>, ValueQuery>; + + /// The present candidate list. A current member or runner-up can never enter this vector + /// and is always implicitly assumed to be a candidate. + /// + /// Second element is the deposit. + /// + /// Invariant: Always sorted based on account id. + #[pallet::storage] + #[pallet::getter(fn candidates)] + pub type Candidates = StorageValue<_, Vec<(T::AccountId, BalanceOf)>, ValueQuery>; + + /// The total number of vote rounds that have happened, excluding the upcoming one. + #[pallet::storage] + #[pallet::getter(fn election_rounds)] + pub type ElectionRounds = StorageValue<_, u32, ValueQuery>; + + /// Votes and locked stake of a particular voter. + /// + /// TWOX-NOTE: SAFE as `AccountId` is a crypto hash. + #[pallet::storage] + #[pallet::getter(fn voting)] + pub type Voting = + StorageMap<_, Twox64Concat, T::AccountId, Voter>, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub members: Vec<(T::AccountId, BalanceOf)>, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { members: Default::default() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + assert!( + self.members.len() as u32 <= T::DesiredMembers::get(), + "Cannot accept more than DesiredMembers genesis member", + ); + let members = self.members.iter().map(|(ref member, ref stake)| { + // make sure they have enough stake. + assert!( + T::Currency::free_balance(member) >= *stake, + "Genesis member does not have enough stake.", + ); + + // Note: all members will only vote for themselves, hence they must be given exactly + // their own stake as total backing. Any sane election should behave as such. + // Nonetheless, stakes will be updated for term 1 onwards according to the election. + Members::::mutate(|members| { + match members.binary_search_by(|m| m.who.cmp(member)) { + Ok(_) => panic!("Duplicate member in elections-phragmen genesis: {}", member), + Err(pos) => members.insert( + pos, + SeatHolder { who: member.clone(), stake: *stake, deposit: Zero::zero() }, + ), + } + }); + + // set self-votes to make persistent. Genesis voters don't have any bond, nor do + // they have any lock. NOTE: this means that we will still try to remove a lock once + // this genesis voter is removed, and for now it is okay because remove_lock is noop + // if lock is not there. + >::insert( + &member, + Voter { votes: vec![member.clone()], stake: *stake, deposit: Zero::zero() }, + ); + + member.clone() + }).collect::>(); + + // report genesis members to upstream, if any. + T::InitializeMembers::initialize_members(&members); } } } -impl Module { +impl Pallet { /// The deposit value of `count` votes. fn deposit_of(count: usize) -> BalanceOf { - T::VotingBondBase::get().saturating_add( - T::VotingBondFactor::get().saturating_mul((count as u32).into()) - ) + T::VotingBondBase::get() + .saturating_add(T::VotingBondFactor::get().saturating_mul((count as u32).into())) } /// Attempts to remove a member `who`. If a runner-up exists, it is used as the replacement. @@ -691,7 +740,7 @@ impl Module { let (imbalance, _remainder) = T::Currency::slash_reserved(who, removed.deposit); debug_assert!(_remainder.is_zero()); T::LoserCandidate::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::SeatHolderSlashed(who.clone(), removed.deposit)); + Self::deposit_event(Event::SeatHolderSlashed(who.clone(), removed.deposit)); } else { T::Currency::unreserve(who, removed.deposit); } @@ -807,7 +856,7 @@ impl Module { let Voter { deposit, .. } = >::take(who); // remove storage, lock and unreserve. - T::Currency::remove_lock(T::ModuleId::get(), who); + T::Currency::remove_lock(T::PalletId::get(), who); // NOTE: we could check the deposit amount before removing and skip if zero, but it will be // a noop anyhow. @@ -829,7 +878,7 @@ impl Module { candidates_and_deposit.append(&mut Self::implicit_candidates_with_deposit()); if candidates_and_deposit.len().is_zero() { - Self::deposit_event(RawEvent::EmptyTerm); + Self::deposit_event(Event::EmptyTerm); return T::DbWeight::get().reads(5); } @@ -956,7 +1005,7 @@ impl Module { { let (imbalance, _) = T::Currency::slash_reserved(c, *d); T::LoserCandidate::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::CandidateSlashed(c.clone(), *d)); + Self::deposit_event(Event::CandidateSlashed(c.clone(), *d)); } }); @@ -996,22 +1045,28 @@ impl Module { // clean candidates. >::kill(); - Self::deposit_event(RawEvent::NewTerm(new_members_sorted_by_id)); - ElectionRounds::mutate(|v| *v += 1); + Self::deposit_event(Event::NewTerm(new_members_sorted_by_id)); + >::mutate(|v| *v += 1); }).map_err(|e| { log::error!( target: "runtime::elections-phragmen", "Failed to run election [{:?}].", e, ); - Self::deposit_event(RawEvent::ElectionError); + Self::deposit_event(Event::ElectionError); }); T::WeightInfo::election_phragmen(weight_candidates, weight_voters, weight_edges) } } -impl Contains for Module { +impl Contains for Pallet { + fn contains(who: &T::AccountId) -> bool { + Self::is_member(who) + } +} + +impl SortedMembers for Pallet { fn contains(who: &T::AccountId) -> bool { Self::is_member(who) } @@ -1033,8 +1088,10 @@ impl Contains for Module { } } -impl ContainsLengthBound for Module { - fn min_len() -> usize { 0 } +impl ContainsLengthBound for Pallet { + fn min_len() -> usize { + 0 + } /// Implementation uses a parameter type so calling is cost-free. fn max_len() -> usize { @@ -1045,15 +1102,18 @@ impl ContainsLengthBound for Module { #[cfg(test)] mod tests { use super::*; - use frame_support::{assert_ok, assert_noop, parameter_types, - traits::OnInitialize, + use frame_support::{ + assert_ok, assert_noop, parameter_types, traits::OnInitialize, + dispatch::DispatchResultWithPostInfo, }; use substrate_test_utils::assert_eq_uvec; use sp_core::H256; use sp_runtime::{ - testing::Header, BuildStorage, DispatchResult, + BuildStorage, + testing::Header, traits::{BlakeTwo256, IdentityLookup}, }; + use frame_system::ensure_signed; use crate as elections_phragmen; parameter_types! { @@ -1084,7 +1144,8 @@ mod tests { type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); - type SS58Prefix = (); + type SS58Prefix = (); + type OnSetCode = (); } parameter_types! { @@ -1157,11 +1218,11 @@ mod tests { } parameter_types! { - pub const ElectionsPhragmenModuleId: LockIdentifier = *b"phrelect"; + pub const ElectionsPhragmenPalletId: LockIdentifier = *b"phrelect"; } impl Config for Test { - type ModuleId = ElectionsPhragmenModuleId; + type PalletId = ElectionsPhragmenPalletId; type Event = Event; type Currency = Balances; type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -1308,12 +1369,11 @@ mod tests { } fn has_lock(who: &u64) -> u64 { - dbg!(Balances::locks(who)); Balances::locks(who) .get(0) .cloned() .map(|lock| { - assert_eq!(lock.id, ElectionsPhragmenModuleId::get()); + assert_eq!(lock.id, ElectionsPhragmenPalletId::get()); lock.amount }) .unwrap_or_default() @@ -1369,11 +1429,11 @@ mod tests { ensure_members_has_approval_stake(); } - fn submit_candidacy(origin: Origin) -> DispatchResult { + fn submit_candidacy(origin: Origin) -> DispatchResultWithPostInfo { Elections::submit_candidacy(origin, Elections::candidates().len() as u32) } - fn vote(origin: Origin, votes: Vec, stake: u64) -> DispatchResult { + fn vote(origin: Origin, votes: Vec, stake: u64) -> DispatchResultWithPostInfo { // historical note: helper function was created in a period of time in which the API of vote // call was changing. Currently it is a wrapper for the original call and does not do much. // Nonetheless, totally harmless. @@ -2074,7 +2134,7 @@ mod tests { assert_eq!( System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::EmptyTerm), + Event::elections_phragmen(super::Event::EmptyTerm), ) }) } @@ -2093,7 +2153,7 @@ mod tests { assert_eq!( System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])), + Event::elections_phragmen(super::Event::NewTerm(vec![(4, 40), (5, 50)])), ); assert_eq!(members_and_stake(), vec![(4, 40), (5, 50)]); @@ -2107,7 +2167,7 @@ mod tests { assert_eq!( System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::NewTerm(vec![])), + Event::elections_phragmen(super::Event::NewTerm(vec![])), ); // outgoing have lost their bond. @@ -2180,7 +2240,7 @@ mod tests { assert_eq!( System::events().iter().last().unwrap().event, - Event::elections_phragmen(RawEvent::NewTerm(vec![])), + Event::elections_phragmen(super::Event::NewTerm(vec![])), ) }); } @@ -2540,7 +2600,7 @@ mod tests { assert_eq!(balances(&5), (45, 2)); assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])) + event.event == Event::elections_phragmen(super::Event::NewTerm(vec![(4, 40), (5, 50)])) })); }) } diff --git a/frame/elections-phragmen/src/migrations/mod.rs b/frame/elections-phragmen/src/migrations/mod.rs new file mode 100644 index 0000000000000..9a1f86a1ad7ce --- /dev/null +++ b/frame/elections-phragmen/src/migrations/mod.rs @@ -0,0 +1,23 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! All migrations of this pallet. + +/// Version 3. +pub mod v3; +/// Version 4. +pub mod v4; diff --git a/frame/elections-phragmen/src/migrations_3_0_0.rs b/frame/elections-phragmen/src/migrations/v3.rs similarity index 79% rename from frame/elections-phragmen/src/migrations_3_0_0.rs rename to frame/elections-phragmen/src/migrations/v3.rs index 8adc4c1a69f7c..8afc9ed66920b 100644 --- a/frame/elections-phragmen/src/migrations_3_0_0.rs +++ b/frame/elections-phragmen/src/migrations/v3.rs @@ -21,7 +21,6 @@ use codec::{Encode, Decode, FullCodec}; use sp_std::prelude::*; use frame_support::{ RuntimeDebug, weights::Weight, Twox64Concat, - storage::types::{StorageMap, StorageValue}, traits::{GetPalletVersion, PalletVersion}, }; @@ -51,38 +50,21 @@ pub trait V2ToV3 { type Balance: 'static + FullCodec + Copy; } -struct __Candidates; -impl frame_support::traits::StorageInstance for __Candidates { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "Candidates"; -} - -#[allow(type_alias_bounds)] -type Candidates = StorageValue<__Candidates, Vec<(T::AccountId, T::Balance)>>; - -struct __Members; -impl frame_support::traits::StorageInstance for __Members { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "Members"; -} -#[allow(type_alias_bounds)] -type Members = StorageValue<__Members, Vec>>; - -struct __RunnersUp; -impl frame_support::traits::StorageInstance for __RunnersUp { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "RunnersUp"; -} -#[allow(type_alias_bounds)] -type RunnersUp = StorageValue<__RunnersUp, Vec>>; - -struct __Voting; -impl frame_support::traits::StorageInstance for __Voting { - fn pallet_prefix() -> &'static str { "PhragmenElection" } - const STORAGE_PREFIX: &'static str = "Voting"; -} -#[allow(type_alias_bounds)] -type Voting = StorageMap<__Voting, Twox64Concat, T::AccountId, Voter>; +frame_support::generate_storage_alias!( + PhragmenElection, Candidates => Value> +); +frame_support::generate_storage_alias!( + PhragmenElection, Members => Value>> +); +frame_support::generate_storage_alias!( + PhragmenElection, RunnersUp => Value>> +); +frame_support::generate_storage_alias!( + PhragmenElection, Voting => Map< + (Twox64Concat, T::AccountId), + Voter + > +); /// Apply all of the migrations from 2_0_0 to 3_0_0. /// diff --git a/frame/elections-phragmen/src/migrations/v4.rs b/frame/elections-phragmen/src/migrations/v4.rs new file mode 100644 index 0000000000000..f704b203d34cf --- /dev/null +++ b/frame/elections-phragmen/src/migrations/v4.rs @@ -0,0 +1,110 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Migrations to version [`4.0.0`], as denoted by the changelog. + +use frame_support::{ + weights::Weight, + traits::{GetPalletVersion, PalletVersion, Get}, +}; + +/// The old prefix. +pub const OLD_PREFIX: &[u8] = b"PhragmenElection"; + +/// Migrate the entire storage of this pallet to a new prefix. +/// +/// This new prefix must be the same as the one set in construct_runtime. For safety, use +/// `PalletInfo` to get it, as: +/// `::PalletInfo::name::`. +/// +/// The old storage prefix, `PhragmenElection` is hardcoded in the migration code. +pub fn migrate< + T: frame_system::Config, + P: GetPalletVersion, + N: AsRef, +>(new_pallet_name: N) -> Weight { + if new_pallet_name.as_ref().as_bytes() == OLD_PREFIX { + log::info!( + target: "runtime::elections-phragmen", + "New pallet name is equal to the old prefix. No migration needs to be done.", + ); + return 0; + } + let maybe_storage_version =