diff --git a/.gitignore b/.gitignore index c870ff8a2c7290..124358b46f5b26 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ /solana-metrics/ /solana-metrics.tar.bz2 /target/ +/test-ledger/ **/*.rs.bk .cargo diff --git a/Cargo.lock b/Cargo.lock index 45a8ac1839eb35..c7153ab761c537 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -752,6 +752,26 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen", +] + +[[package]] +name = "console_log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" +dependencies = [ + "log 0.4.14", + "web-sys", +] + [[package]] name = "const_fn" version = "0.4.8" @@ -1586,6 +1606,13 @@ dependencies = [ "tempfile", ] +[[package]] +name = "gen-syscall-list" +version = "1.9.4" +dependencies = [ + "regex", +] + [[package]] name = "generic-array" version = "0.12.4" @@ -2255,6 +2282,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "libm" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" + [[package]] name = "librocksdb-sys" version = "6.20.3" @@ -2378,9 +2411,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c748cfe47cb8da225c37595b3108bea1c198c84aaae8ea0ba76d01dda9fc803" +checksum = "469898e909a1774d844793b347135a0cd344ca2f69d082013ecb8061a2229a3a" dependencies = [ "hashbrown 0.11.2", ] @@ -2745,9 +2778,9 @@ checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" [[package]] name = "openssl-src" -version = "300.0.2+3.0.0" +version = "300.0.4+3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14a760a11390b1a5daf72074d4f6ff1a6e772534ae191f999f57e9ee8146d1fb" +checksum = "216e1c6b4549e24182b9d7aa268f645414888a69daf44c7b2d8118da8e7b23e7" dependencies = [ "cc", ] @@ -2768,11 +2801,12 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.13.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91cea1dfd50064e52db033179952d18c770cbc5dfefc8eba45d619357ba3914" +checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" dependencies = [ "async-trait", + "crossbeam-channel", "futures 0.3.18", "js-sys", "lazy_static", @@ -3560,13 +3594,13 @@ dependencies = [ [[package]] name = "rbpf-cli" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 3.0.0-beta.5", "serde", "serde_json", "solana-bpf-loader-program", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-program-runtime", "solana-sdk", "solana_rbpf", @@ -3609,13 +3643,16 @@ dependencies = [ [[package]] name = "reed-solomon-erasure" -version = "4.0.2" +version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a415a013dd7c5d4221382329a5a3482566da675737494935cbbbcdec04662f9d" +checksum = "7170bac0d8306941e101df0caaa6518b10bc4232dd36c34f1cb78b8a063024db" dependencies = [ "cc", "libc", + "libm", + "parking_lot 0.11.2", "smallvec 1.7.0", + "spin 0.9.2", ] [[package]] @@ -3704,7 +3741,7 @@ dependencies = [ "cc", "libc", "once_cell", - "spin", + "spin 0.5.2", "untrusted", "web-sys", "winapi 0.3.9", @@ -4222,7 +4259,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.9.0" +version = "1.9.4" dependencies = [ "Inflector", "base64 0.12.3", @@ -4243,12 +4280,12 @@ dependencies = [ [[package]] name = "solana-accounts-bench" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "log 0.4.14", "rayon", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-runtime", "solana-sdk", @@ -4257,7 +4294,7 @@ dependencies = [ [[package]] name = "solana-accounts-cluster-bench" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "log 0.4.14", @@ -4270,12 +4307,13 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-local-cluster", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-net-utils", "solana-runtime", "solana-sdk", "solana-streamer", + "solana-test-validator", "solana-transaction-status", "solana-version", "spl-token", @@ -4283,7 +4321,7 @@ dependencies = [ [[package]] name = "solana-accountsdb-plugin-interface" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log 0.4.14", "solana-sdk", @@ -4293,7 +4331,7 @@ dependencies = [ [[package]] name = "solana-accountsdb-plugin-manager" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "crossbeam-channel", @@ -4303,7 +4341,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-accountsdb-plugin-interface", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-rpc", @@ -4315,7 +4353,7 @@ dependencies = [ [[package]] name = "solana-accountsdb-plugin-postgres" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "chrono", @@ -4328,7 +4366,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-accountsdb-plugin-interface", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-runtime", @@ -4338,9 +4376,38 @@ dependencies = [ "tokio-postgres", ] +[[package]] +name = "solana-address-lookup-table-program" +version = "1.9.4" +dependencies = [ + "bincode", + "bytemuck", + "log 0.4.14", + "num-derive", + "num-traits", + "rustc_version 0.4.0", + "serde", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-program-runtime", + "solana-sdk", + "thiserror", +] + +[[package]] +name = "solana-address-lookup-table-program-tests" +version = "1.9.4" +dependencies = [ + "assert_matches", + "bincode", + "solana-address-lookup-table-program", + "solana-program-test", + "solana-sdk", +] + [[package]] name = "solana-banking-bench" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "crossbeam-channel", @@ -4350,7 +4417,7 @@ dependencies = [ "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-perf", "solana-poh", @@ -4362,23 +4429,24 @@ dependencies = [ [[package]] name = "solana-banks-client" -version = "1.9.0" +version = "1.9.4" dependencies = [ "borsh", "futures 0.3.18", "solana-banks-interface", "solana-banks-server", - "solana-program 1.9.0", + "solana-program 1.9.4", "solana-runtime", "solana-sdk", "tarpc", + "thiserror", "tokio", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "1.9.0" +version = "1.9.4" dependencies = [ "serde", "solana-sdk", @@ -4387,7 +4455,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "futures 0.3.18", @@ -4403,11 +4471,11 @@ dependencies = [ [[package]] name = "solana-bench-streamer" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "solana-clap-utils", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-net-utils", "solana-streamer", "solana-version", @@ -4415,7 +4483,7 @@ dependencies = [ [[package]] name = "solana-bench-tps" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "log 0.4.14", @@ -4429,7 +4497,7 @@ dependencies = [ "solana-genesis", "solana-gossip", "solana-local-cluster", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4441,14 +4509,13 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "byteorder", "libsecp256k1 0.6.0", "log 0.4.14", "rand 0.7.3", - "regex", "solana-measure", "solana-program-runtime", "solana-runtime", @@ -4459,14 +4526,14 @@ dependencies = [ [[package]] name = "solana-bucket-map" -version = "1.9.0" +version = "1.9.4" dependencies = [ "fs_extra", "log 0.4.14", "memmap2 0.5.0", "rand 0.7.3", "rayon", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-sdk", "tempfile", @@ -4474,7 +4541,7 @@ dependencies = [ [[package]] name = "solana-cargo-build-bpf" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bzip2", "cargo_metadata", @@ -4488,7 +4555,7 @@ dependencies = [ [[package]] name = "solana-cargo-test-bpf" -version = "1.9.0" +version = "1.9.4" dependencies = [ "cargo_metadata", "clap 2.33.3", @@ -4496,7 +4563,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.9.0" +version = "1.9.4" dependencies = [ "chrono", "clap 2.33.3", @@ -4513,7 +4580,7 @@ dependencies = [ [[package]] name = "solana-cli" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "bs58 0.4.0", @@ -4539,7 +4606,7 @@ dependencies = [ "solana-client", "solana-config-program", "solana-faucet", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-program-runtime", "solana-remote-wallet", "solana-sdk", @@ -4557,7 +4624,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.9.0" +version = "1.9.4" dependencies = [ "anyhow", "dirs-next", @@ -4570,7 +4637,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.9.0" +version = "1.9.4" dependencies = [ "Inflector", "base64 0.13.0", @@ -4592,7 +4659,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_matches", "base64 0.13.0", @@ -4612,7 +4679,7 @@ dependencies = [ "solana-account-decoder", "solana-clap-utils", "solana-faucet", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-net-utils", "solana-sdk", @@ -4627,12 +4694,13 @@ dependencies = [ [[package]] name = "solana-client-test" -version = "1.9.0" +version = "1.9.4" dependencies = [ "serde_json", "serial_test", "solana-client", - "solana-logger 1.9.0", + "solana-ledger", + "solana-logger 1.9.4", "solana-measure", "solana-merkle-tree", "solana-metrics", @@ -4643,13 +4711,14 @@ dependencies = [ "solana-sdk", "solana-streamer", "solana-test-validator", + "solana-transaction-status", "solana-version", "systemstat", ] [[package]] name = "solana-compute-budget-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -4657,20 +4726,20 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "chrono", "serde", "serde_derive", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-program-runtime", "solana-sdk", ] [[package]] name = "solana-core" -version = "1.9.0" +version = "1.9.4" dependencies = [ "ahash 0.7.6", "base64 0.12.3", @@ -4704,11 +4773,11 @@ dependencies = [ "solana-accountsdb-plugin-manager", "solana-client", "solana-entry", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", "solana-gossip", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4737,7 +4806,7 @@ dependencies = [ [[package]] name = "solana-dos" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "clap 2.33.3", @@ -4746,7 +4815,7 @@ dependencies = [ "solana-client", "solana-core", "solana-gossip", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-net-utils", "solana-perf", "solana-sdk", @@ -4756,7 +4825,7 @@ dependencies = [ [[package]] name = "solana-download-utils" -version = "1.9.0" +version = "1.9.4" dependencies = [ "console", "indicatif", @@ -4768,7 +4837,7 @@ dependencies = [ [[package]] name = "solana-entry" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "dlopen", @@ -4778,7 +4847,7 @@ dependencies = [ "rand 0.7.3", "rayon", "serde", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-merkle-tree", "solana-metrics", @@ -4789,7 +4858,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "byteorder", @@ -4799,7 +4868,7 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-sdk", "solana-version", @@ -4830,7 +4899,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "bv", @@ -4841,8 +4910,8 @@ dependencies = [ "serde", "serde_derive", "sha2", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "thiserror", ] @@ -4860,7 +4929,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.9.0" +version = "1.9.4" dependencies = [ "proc-macro2 1.0.32", "quote 1.0.10", @@ -4870,7 +4939,7 @@ dependencies = [ [[package]] name = "solana-genesis" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.12.3", "clap 2.33.3", @@ -4881,7 +4950,7 @@ dependencies = [ "solana-cli-config", "solana-entry", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-runtime", "solana-sdk", "solana-stake-program", @@ -4892,7 +4961,7 @@ dependencies = [ [[package]] name = "solana-genesis-utils" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-download-utils", "solana-runtime", @@ -4901,7 +4970,7 @@ dependencies = [ [[package]] name = "solana-gossip" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "bv", @@ -4925,10 +4994,10 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-entry", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-net-utils", @@ -4944,7 +5013,7 @@ dependencies = [ [[package]] name = "solana-install" -version = "1.9.0" +version = "1.9.4" dependencies = [ "atty", "bincode", @@ -4964,7 +5033,7 @@ dependencies = [ "solana-clap-utils", "solana-client", "solana-config-program", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-sdk", "solana-version", "tar", @@ -4976,7 +5045,7 @@ dependencies = [ [[package]] name = "solana-keygen" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "clap 2.33.3", @@ -4992,7 +5061,7 @@ dependencies = [ [[package]] name = "solana-ledger" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_matches", "bincode", @@ -5023,9 +5092,9 @@ dependencies = [ "solana-account-decoder", "solana-bpf-loader-program", "solana-entry", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-perf", @@ -5045,7 +5114,7 @@ dependencies = [ [[package]] name = "solana-ledger-tool" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_cmd", "bs58 0.4.0", @@ -5066,7 +5135,7 @@ dependencies = [ "solana-core", "solana-entry", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-runtime", "solana-sdk", @@ -5081,7 +5150,7 @@ dependencies = [ [[package]] name = "solana-local-cluster" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_matches", "crossbeam-channel", @@ -5099,7 +5168,7 @@ dependencies = [ "solana-entry", "solana-gossip", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-runtime", "solana-sdk", "solana-stake-program", @@ -5110,13 +5179,13 @@ dependencies = [ [[package]] name = "solana-log-analyzer" -version = "1.9.0" +version = "1.9.4" dependencies = [ "byte-unit", "clap 2.33.3", "serde", "serde_json", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-version", ] @@ -5133,7 +5202,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.9.0" +version = "1.9.4" dependencies = [ "env_logger 0.9.0", "lazy_static", @@ -5142,7 +5211,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log 0.4.14", "solana-sdk", @@ -5150,11 +5219,11 @@ dependencies = [ [[package]] name = "solana-merkle-root-bench" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "log 0.4.14", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-runtime", "solana-sdk", @@ -5163,17 +5232,17 @@ dependencies = [ [[package]] name = "solana-merkle-tree" -version = "1.9.0" +version = "1.9.4" dependencies = [ "fast-math", "hex", "matches", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-metrics" -version = "1.9.0" +version = "1.9.4" dependencies = [ "env_logger 0.9.0", "gethostname", @@ -5187,19 +5256,19 @@ dependencies = [ [[package]] name = "solana-net-shaper" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "rand 0.7.3", "serde", "serde_json", "solana-clap-utils", - "solana-logger 1.9.0", + "solana-logger 1.9.4", ] [[package]] name = "solana-net-utils" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "clap 2.33.3", @@ -5209,7 +5278,7 @@ dependencies = [ "serde", "serde_derive", "socket2", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-sdk", "solana-version", "tokio", @@ -5218,7 +5287,7 @@ dependencies = [ [[package]] name = "solana-notifier" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log 0.4.14", "reqwest", @@ -5227,7 +5296,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "caps", @@ -5242,7 +5311,7 @@ dependencies = [ "rand 0.7.3", "rayon", "serde", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-rayon-threadlimit", "solana-sdk", @@ -5251,7 +5320,7 @@ dependencies = [ [[package]] name = "solana-poh" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "core_affinity", @@ -5261,7 +5330,7 @@ dependencies = [ "rand 0.7.3", "solana-entry", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-perf", @@ -5273,14 +5342,14 @@ dependencies = [ [[package]] name = "solana-poh-bench" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "log 0.4.14", "rand 0.7.3", "rayon", "solana-entry", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-perf", "solana-sdk", @@ -5326,8 +5395,9 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ + "anyhow", "assert_matches", "base64 0.13.0", "bincode", @@ -5338,8 +5408,12 @@ dependencies = [ "bs58 0.4.0", "bv", "bytemuck", + "console_error_panic_hook", + "console_log", "curve25519-dalek 3.2.0", + "getrandom 0.1.16", "itertools 0.10.1", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log 0.4.14", @@ -5355,17 +5429,18 @@ dependencies = [ "serde_json", "sha2", "sha3", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", - "solana-sdk-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", + "solana-sdk-macro 1.9.4", "static_assertions", "thiserror", + "wasm-bindgen", ] [[package]] name = "solana-program-runtime" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.13.0", "bincode", @@ -5377,14 +5452,14 @@ dependencies = [ "num-traits", "rustc_version 0.4.0", "serde", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-sdk", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_matches", "async-trait", @@ -5396,7 +5471,7 @@ dependencies = [ "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-program-runtime", "solana-runtime", "solana-sdk", @@ -5407,7 +5482,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.9.0" +version = "1.9.4" dependencies = [ "lazy_static", "num_cpus", @@ -5415,7 +5490,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base32", "console", @@ -5434,7 +5509,7 @@ dependencies = [ [[package]] name = "solana-replica-lib" -version = "1.9.0" +version = "1.9.4" dependencies = [ "crossbeam-channel", "futures-util", @@ -5450,7 +5525,7 @@ dependencies = [ [[package]] name = "solana-replica-node" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "crossbeam-channel", @@ -5464,7 +5539,7 @@ dependencies = [ "solana-gossip", "solana-ledger", "solana-local-cluster", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-net-utils", "solana-replica-lib", "solana-rpc", @@ -5480,7 +5555,7 @@ dependencies = [ [[package]] name = "solana-rpc" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.12.3", "bincode", @@ -5534,7 +5609,7 @@ dependencies = [ [[package]] name = "solana-rpc-test" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "bs58 0.4.0", @@ -5546,7 +5621,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-client", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-rpc", "solana-sdk", "solana-streamer", @@ -5557,7 +5632,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.9.0" +version = "1.9.4" dependencies = [ "arrayref", "assert_matches", @@ -5577,6 +5652,8 @@ dependencies = [ "libsecp256k1 0.6.0", "log 0.4.14", "memmap2 0.5.0", + "num-derive", + "num-traits", "num_cpus", "ouroboros", "rand 0.7.3", @@ -5585,12 +5662,13 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", + "solana-address-lookup-table-program", "solana-bucket-map", "solana-compute-budget-program", "solana-config-program", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-program-runtime", @@ -5607,11 +5685,12 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_matches", "base64 0.13.0", "bincode", + "bitflags", "borsh", "bs58 0.4.0", "bytemuck", @@ -5625,6 +5704,7 @@ dependencies = [ "generic-array 0.14.4", "hmac 0.11.0", "itertools 0.10.1", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log 0.4.14", @@ -5643,14 +5723,15 @@ dependencies = [ "serde_json", "sha2", "sha3", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", - "solana-program 1.9.0", - "solana-sdk-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", + "solana-program 1.9.4", + "solana-sdk-macro 1.9.4", "thiserror", "tiny-bip39", "uriparse", + "wasm-bindgen", ] [[package]] @@ -5668,7 +5749,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "proc-macro2 1.0.32", @@ -5679,10 +5760,10 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log 0.4.14", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-runtime", "solana-sdk", @@ -5690,7 +5771,7 @@ dependencies = [ [[package]] name = "solana-stake-accounts" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "solana-clap-utils", @@ -5704,7 +5785,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "log 0.4.14", @@ -5715,9 +5796,9 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "solana-metrics", "solana-program-runtime", "solana-sdk", @@ -5727,7 +5808,7 @@ dependencies = [ [[package]] name = "solana-storage-bigtable" -version = "1.9.0" +version = "1.9.4" dependencies = [ "backoff", "bincode", @@ -5753,7 +5834,7 @@ dependencies = [ [[package]] name = "solana-storage-proto" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "bs58 0.4.0", @@ -5768,24 +5849,24 @@ dependencies = [ [[package]] name = "solana-store-tool" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "log 0.4.14", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-runtime", "solana-version", ] [[package]] name = "solana-streamer" -version = "1.9.0" +version = "1.9.4" dependencies = [ "itertools 0.10.1", "libc", "log 0.4.14", "nix", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-perf", "solana-sdk", @@ -5794,13 +5875,13 @@ dependencies = [ [[package]] name = "solana-sys-tuner" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "libc", "log 0.4.14", "nix", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-version", "sysctl", "unix_socket2", @@ -5809,15 +5890,18 @@ dependencies = [ [[package]] name = "solana-test-validator" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.12.3", "log 0.4.14", + "serde_derive", + "serde_json", + "solana-cli-output", "solana-client", "solana-core", "solana-gossip", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-net-utils", "solana-program-test", "solana-rpc", @@ -5828,7 +5912,7 @@ dependencies = [ [[package]] name = "solana-tokens" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "chrono", @@ -5844,7 +5928,7 @@ dependencies = [ "solana-clap-utils", "solana-cli-config", "solana-client", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-remote-wallet", "solana-sdk", "solana-streamer", @@ -5859,7 +5943,7 @@ dependencies = [ [[package]] name = "solana-transaction-dos" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "clap 2.33.3", @@ -5873,7 +5957,7 @@ dependencies = [ "solana-faucet", "solana-gossip", "solana-local-cluster", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-net-utils", "solana-runtime", @@ -5885,7 +5969,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.9.0" +version = "1.9.4" dependencies = [ "Inflector", "base64 0.12.3", @@ -5910,7 +5994,7 @@ dependencies = [ [[package]] name = "solana-upload-perf" -version = "1.9.0" +version = "1.9.4" dependencies = [ "serde_json", "solana-metrics", @@ -5918,7 +6002,7 @@ dependencies = [ [[package]] name = "solana-validator" -version = "1.9.0" +version = "1.9.4" dependencies = [ "chrono", "clap 2.33.3", @@ -5946,7 +6030,7 @@ dependencies = [ "solana-genesis-utils", "solana-gossip", "solana-ledger", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-net-utils", "solana-perf", @@ -5967,20 +6051,20 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log 0.4.14", "rustc_version 0.4.0", "serde", "serde_derive", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", "solana-sdk", ] [[package]] name = "solana-vote-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "log 0.4.14", @@ -5989,9 +6073,9 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "solana-metrics", "solana-program-runtime", "solana-sdk", @@ -6000,7 +6084,7 @@ dependencies = [ [[package]] name = "solana-watchtower" -version = "1.9.0" +version = "1.9.4" dependencies = [ "clap 2.33.3", "humantime", @@ -6009,7 +6093,7 @@ dependencies = [ "solana-cli-config", "solana-cli-output", "solana-client", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-notifier", "solana-sdk", @@ -6018,9 +6102,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.16" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3af7860a2bf51e63a07c4098966b1c80e8cbfdab3cf4ac36aac7fdd80ea1094c" +checksum = "8fb565d026461ba89d1d92cc36cf0882fba44076559c3bbed1e8a9888112b3d7" dependencies = [ "byteorder", "combine", @@ -6041,6 +6125,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "spin" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" + [[package]] name = "spl-associated-token-account" version = "1.0.3" @@ -6279,9 +6369,9 @@ dependencies = [ [[package]] name = "tarpc" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cb992a07637db1bcc0e4511d0c58c3f3a03f509d7c6cc2826f7646deac2032" +checksum = "b85d0a9369a919ba0db919b142a2b704cd207dfc676f7a43c2d105d0bc225487" dependencies = [ "anyhow", "fnv", @@ -6303,9 +6393,9 @@ dependencies = [ [[package]] name = "tarpc-plugins" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea80818e6c75f81d961d7426c1b938cbea6b3a51533b5ee71b61f82166b7ef3d" +checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ "proc-macro2 1.0.32", "quote 1.0.10", @@ -6665,6 +6755,7 @@ dependencies = [ "futures-sink", "pin-project", "serde", + "serde_json", ] [[package]] @@ -6860,9 +6951,9 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.12.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99003208b647dae59dcefc49c98aecaa3512fbc29351685d4b9ef23a9218458e" +checksum = "599f388ecb26b28d9c1b2e4437ae019a7b336018b45ed911458cd9ebf91129f6" dependencies = [ "opentelemetry", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 124e11a576f921..cafa5685d30a42 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,7 +46,10 @@ members = [ "poh", "poh-bench", "program-test", + "programs/address-lookup-table", + "programs/address-lookup-table-tests", "programs/bpf_loader", + "programs/bpf_loader/gen-syscall-list", "programs/compute-budget", "programs/config", "programs/stake", diff --git a/account-decoder/Cargo.toml b/account-decoder/Cargo.toml index fbd406ee07eae9..425c2fe2f876f8 100644 --- a/account-decoder/Cargo.toml +++ b/account-decoder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-account-decoder" -version = "1.9.0" +version = "1.9.4" description = "Solana account decoder" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,9 +19,9 @@ lazy_static = "1.4.0" serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" -solana-config-program = { path = "../programs/config", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-config-program = { path = "../programs/config", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } spl-token = { version = "=3.2.0", features = ["no-entrypoint"] } thiserror = "1.0" zstd = "0.9.0" diff --git a/accounts-bench/Cargo.toml b/accounts-bench/Cargo.toml index 278fff8139da90..db2ccfe72786c2 100644 --- a/accounts-bench/Cargo.toml +++ b/accounts-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accounts-bench" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,11 +11,11 @@ publish = false [dependencies] log = "0.4.14" rayon = "1.5.1" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } clap = "2.33.1" [package.metadata.docs.rs] diff --git a/accounts-cluster-bench/Cargo.toml b/accounts-cluster-bench/Cargo.toml index fb947c89d8c365..6c8cddc0435414 100644 --- a/accounts-cluster-bench/Cargo.toml +++ b/accounts-cluster-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accounts-cluster-bench" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,24 +13,25 @@ clap = "2.33.1" log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-core = { path = "../core", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-test-validator = { path = "../test-validator", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } spl-token = { version = "=3.2.0", features = ["no-entrypoint"] } [dev-dependencies] -solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" } +solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accounts-cluster-bench/src/main.rs b/accounts-cluster-bench/src/main.rs index 4d6d029f64dda9..31b43add99cb88 100644 --- a/accounts-cluster-bench/src/main.rs +++ b/accounts-cluster-bench/src/main.rs @@ -23,6 +23,7 @@ use { solana_streamer::socket::SocketAddrSpace, solana_transaction_status::parse_token::spl_token_instruction, std::{ + cmp::min, net::SocketAddr, process::exit, sync::{ @@ -156,24 +157,30 @@ fn make_create_message( fn make_close_message( keypair: &Keypair, base_keypair: &Keypair, - max_closed_seed: Arc, + max_created: Arc, + max_closed: Arc, num_instructions: usize, balance: u64, spl_token: bool, ) -> Message { let instructions: Vec<_> = (0..num_instructions) .into_iter() - .map(|_| { + .filter_map(|_| { let program_id = if spl_token { inline_spl_token::id() } else { system_program::id() }; - let seed = max_closed_seed.fetch_add(1, Ordering::Relaxed).to_string(); + let max_created_seed = max_created.load(Ordering::Relaxed); + let max_closed_seed = max_closed.load(Ordering::Relaxed); + if max_closed_seed >= max_created_seed { + return None; + } + let seed = max_closed.fetch_add(1, Ordering::Relaxed).to_string(); let address = Pubkey::create_with_seed(&base_keypair.pubkey(), &seed, &program_id).unwrap(); if spl_token { - spl_token_instruction( + Some(spl_token_instruction( spl_token::instruction::close_account( &spl_token::id(), &spl_token_pubkey(&address), @@ -182,16 +189,16 @@ fn make_close_message( &[], ) .unwrap(), - ) + )) } else { - system_instruction::transfer_with_seed( + Some(system_instruction::transfer_with_seed( &address, &base_keypair.pubkey(), seed, &program_id, &keypair.pubkey(), balance, - ) + )) } }) .collect(); @@ -211,6 +218,7 @@ fn run_accounts_bench( maybe_lamports: Option, num_instructions: usize, mint: Option, + reclaim_accounts: bool, ) { assert!(num_instructions > 0); let client = @@ -350,6 +358,7 @@ fn run_accounts_bench( let message = make_close_message( payer_keypairs[0], &base_keypair, + seed_tracker.max_created.clone(), seed_tracker.max_closed.clone(), 1, min_balance, @@ -372,7 +381,7 @@ fn run_accounts_bench( } count += 1; - if last_log.elapsed().as_millis() > 3000 { + if last_log.elapsed().as_millis() > 3000 || count >= iterations { info!( "total_accounts_created: {} total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", total_accounts_created, total_accounts_closed, tx_sent_count, count, balances @@ -387,6 +396,83 @@ fn run_accounts_bench( } } executor.close(); + + if reclaim_accounts { + let executor = TransactionExecutor::new(entrypoint_addr); + loop { + let max_closed_seed = seed_tracker.max_closed.load(Ordering::Relaxed); + let max_created_seed = seed_tracker.max_created.load(Ordering::Relaxed); + + if latest_blockhash.elapsed().as_millis() > 10_000 { + blockhash = client.get_latest_blockhash().expect("blockhash"); + latest_blockhash = Instant::now(); + } + message.recent_blockhash = blockhash; + let fee = client + .get_fee_for_message(&message) + .expect("get_fee_for_message"); + + let sigs_len = executor.num_outstanding(); + if sigs_len < batch_size && max_closed_seed < max_created_seed { + let num_to_close = min( + batch_size - sigs_len, + (max_created_seed - max_closed_seed) as usize, + ); + if num_to_close >= payer_keypairs.len() { + info!("closing {} accounts", num_to_close); + let chunk_size = num_to_close / payer_keypairs.len(); + info!("{:?} chunk_size", chunk_size); + if chunk_size > 0 { + for (i, keypair) in payer_keypairs.iter().enumerate() { + let txs: Vec<_> = (0..chunk_size) + .into_par_iter() + .filter_map(|_| { + let message = make_close_message( + keypair, + &base_keypair, + seed_tracker.max_created.clone(), + seed_tracker.max_closed.clone(), + num_instructions, + min_balance, + mint.is_some(), + ); + if message.instructions.is_empty() { + return None; + } + let signers: Vec<&Keypair> = vec![keypair, &base_keypair]; + Some(Transaction::new(&signers, message, blockhash)) + }) + .collect(); + balances[i] = balances[i].saturating_sub(fee * txs.len() as u64); + info!("close txs: {}", txs.len()); + let new_ids = executor.push_transactions(txs); + info!("close ids: {}", new_ids.len()); + tx_sent_count += new_ids.len(); + total_accounts_closed += (num_instructions * new_ids.len()) as u64; + } + } + } + } else { + let _ = executor.drain_cleared(); + } + count += 1; + if last_log.elapsed().as_millis() > 3000 || max_closed_seed >= max_created_seed { + info!( + "total_accounts_closed: {} tx_sent_count: {} loop_count: {} balance(s): {:?}", + total_accounts_closed, tx_sent_count, count, balances + ); + last_log = Instant::now(); + } + + if max_closed_seed >= max_created_seed { + break; + } + if executor.num_outstanding() >= batch_size { + sleep(Duration::from_millis(500)); + } + } + executor.close(); + } } fn main() { @@ -462,7 +548,7 @@ fn main() { .long("iterations") .takes_value(true) .value_name("NUM") - .help("Number of iterations to make"), + .help("Number of iterations to make. 0 = unlimited iterations."), ) .arg( Arg::with_name("check_gossip") @@ -475,6 +561,12 @@ fn main() { .takes_value(true) .help("Mint address to initialize account"), ) + .arg( + Arg::with_name("reclaim_accounts") + .long("reclaim-accounts") + .takes_value(false) + .help("Reclaim accounts after session ends; incompatible with --iterations 0"), + ) .get_matches(); let skip_gossip = !matches.is_present("check_gossip"); @@ -556,6 +648,7 @@ fn main() { lamports, num_instructions, mint, + matches.is_present("reclaim_accounts"), ); } @@ -564,12 +657,18 @@ pub mod test { use { super::*, solana_core::validator::ValidatorConfig, + solana_faucet::faucet::run_local_faucet, solana_local_cluster::{ local_cluster::{ClusterConfig, LocalCluster}, validator_configs::make_identical_validator_configs, }, solana_measure::measure::Measure, - solana_sdk::poh_config::PohConfig, + solana_sdk::{native_token::sol_to_lamports, poh_config::PohConfig}, + solana_test_validator::TestValidator, + spl_token::{ + solana_program::program_pack::Pack, + state::{Account, Mint}, + }, }; #[test] @@ -605,6 +704,108 @@ pub mod test { maybe_lamports, num_instructions, None, + false, + ); + start.stop(); + info!("{}", start); + } + + #[test] + fn test_create_then_reclaim_spl_token_accounts() { + solana_logger::setup(); + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = TestValidator::with_custom_fees( + mint_pubkey, + 1, + Some(faucet_addr), + SocketAddrSpace::Unspecified, + ); + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + + // Created funder + let funder = Keypair::new(); + let latest_blockhash = rpc_client.get_latest_blockhash().unwrap(); + let signature = rpc_client + .request_airdrop_with_blockhash( + &funder.pubkey(), + sol_to_lamports(1.0), + &latest_blockhash, + ) + .unwrap(); + rpc_client + .confirm_transaction_with_spinner( + &signature, + &latest_blockhash, + CommitmentConfig::confirmed(), + ) + .unwrap(); + + // Create Mint + let spl_mint_keypair = Keypair::new(); + let spl_mint_len = Mint::get_packed_len(); + let spl_mint_rent = rpc_client + .get_minimum_balance_for_rent_exemption(spl_mint_len) + .unwrap(); + let transaction = Transaction::new_signed_with_payer( + &[ + system_instruction::create_account( + &funder.pubkey(), + &spl_mint_keypair.pubkey(), + spl_mint_rent, + spl_mint_len as u64, + &inline_spl_token::id(), + ), + spl_token_instruction( + spl_token::instruction::initialize_mint( + &spl_token::id(), + &spl_token_pubkey(&spl_mint_keypair.pubkey()), + &spl_token_pubkey(&spl_mint_keypair.pubkey()), + None, + 2, + ) + .unwrap(), + ), + ], + Some(&funder.pubkey()), + &[&funder, &spl_mint_keypair], + latest_blockhash, + ); + let _sig = rpc_client + .send_and_confirm_transaction(&transaction) + .unwrap(); + + let account_len = Account::get_packed_len(); + let minimum_balance = rpc_client + .get_minimum_balance_for_rent_exemption(account_len) + .unwrap(); + + let iterations = 5; + let batch_size = 100; + let close_nth_batch = 0; + let num_instructions = 4; + let mut start = Measure::start("total accounts run"); + let keypair0 = Keypair::new(); + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + run_accounts_bench( + test_validator + .rpc_url() + .replace("http://", "") + .parse() + .unwrap(), + faucet_addr, + &[&keypair0, &keypair1, &keypair2], + iterations, + Some(account_len as u64), + batch_size, + close_nth_batch, + Some(minimum_balance), + num_instructions, + Some(spl_mint_keypair.pubkey()), + true, ); start.stop(); info!("{}", start); diff --git a/accountsdb-plugin-interface/Cargo.toml b/accountsdb-plugin-interface/Cargo.toml index 12d7e38b19ce37..d7ada7cc83e56d 100644 --- a/accountsdb-plugin-interface/Cargo.toml +++ b/accountsdb-plugin-interface/Cargo.toml @@ -3,17 +3,17 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accountsdb-plugin-interface" description = "The Solana AccountsDb plugin interface." -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" -documentation = "https://docs.rs/solana-validator" +documentation = "https://docs.rs/solana-accountsdb-plugin-interface" [dependencies] log = "0.4.11" thiserror = "1.0.30" -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs b/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs index 3a6caa53a2881a..e767d864a28333 100644 --- a/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs +++ b/accountsdb-plugin-interface/src/accountsdb_plugin_interface.rs @@ -3,8 +3,8 @@ /// In addition, the dynamic library must export a "C" function _create_plugin which /// creates the implementation of the plugin. use { - solana_sdk::{signature::Signature, transaction::SanitizedTransaction}, - solana_transaction_status::TransactionStatusMeta, + solana_sdk::{clock::UnixTimestamp, signature::Signature, transaction::SanitizedTransaction}, + solana_transaction_status::{Reward, TransactionStatusMeta}, std::{any::Any, error, io}, thiserror::Error, }; @@ -12,54 +12,117 @@ use { impl Eq for ReplicaAccountInfo<'_> {} #[derive(Clone, PartialEq, Debug)] +/// Information about an account being updated pub struct ReplicaAccountInfo<'a> { + /// The Pubkey for the account pub pubkey: &'a [u8], + + /// The lamports for the account pub lamports: u64, + + /// The Pubkey of the owner program account pub owner: &'a [u8], + + /// This account's data contains a loaded program (and is now read-only) pub executable: bool, + + /// The epoch at which this account will next owe rent pub rent_epoch: u64, + + /// The data held in this account. pub data: &'a [u8], + + /// A global monotonically increasing atomic number, which can be used + /// to tell the order of the account update. For example, when an + /// account is updated in the same slot multiple times, the update + /// with higher write_version should supersede the one with lower + /// write_version. pub write_version: u64, } +/// A wrapper to future-proof ReplicaAccountInfo handling. +/// If there were a change to the structure of ReplicaAccountInfo, +/// there would be new enum entry for the newer version, forcing +/// plugin implementations to handle the change. pub enum ReplicaAccountInfoVersions<'a> { V0_0_1(&'a ReplicaAccountInfo<'a>), } +/// Information about a transaction #[derive(Clone, Debug)] pub struct ReplicaTransactionInfo<'a> { + /// The first signature of the transaction, used for identifying the transaction. pub signature: &'a Signature, + + /// Indicates if the transaction is a simple vote transaction. pub is_vote: bool, + + /// The sanitized transaction. pub transaction: &'a SanitizedTransaction, + + /// Metadata of the transaction status. pub transaction_status_meta: &'a TransactionStatusMeta, } +/// A wrapper to future-proof ReplicaTransactionInfo handling. +/// If there were a change to the structure of ReplicaTransactionInfo, +/// there would be new enum entry for the newer version, forcing +/// plugin implementations to handle the change. pub enum ReplicaTransactionInfoVersions<'a> { V0_0_1(&'a ReplicaTransactionInfo<'a>), } +#[derive(Clone, Debug)] +pub struct ReplicaBlockInfo<'a> { + pub slot: u64, + pub blockhash: &'a str, + pub rewards: &'a [Reward], + pub block_time: Option, + pub block_height: Option, +} + +pub enum ReplicaBlockInfoVersions<'a> { + V0_0_1(&'a ReplicaBlockInfo<'a>), +} + +/// Errors returned by plugin calls #[derive(Error, Debug)] pub enum AccountsDbPluginError { + /// Error opening the configuration file; for example, when the file + /// is not found or when the validator process has no permission to read it. #[error("Error opening config file. Error detail: ({0}).")] ConfigFileOpenError(#[from] io::Error), + /// Error in reading the content of the config file or the content + /// is not in the expected format. #[error("Error reading config file. Error message: ({msg})")] ConfigFileReadError { msg: String }, + /// Error when updating the account. #[error("Error updating account. Error message: ({msg})")] AccountsUpdateError { msg: String }, + /// Error when updating the slot status #[error("Error updating slot status. Error message: ({msg})")] SlotStatusUpdateError { msg: String }, + /// Any custom error defined by the plugin. #[error("Plugin-defined custom error. Error message: ({0})")] Custom(Box), } +/// The current status of a slot #[derive(Debug, Clone)] pub enum SlotStatus { + /// The highest slot of the heaviest fork processed by the node. Ledger state at this slot is + /// not derived from a confirmed or finalized block, but if multiple forks are present, is from + /// the fork the validator believes is most likely to finalize. Processed, + + /// The highest slot having reached max vote lockout. Rooted, + + /// The highest slot that has been voted on by supermajority of the cluster, ie. is confirmed. Confirmed, } @@ -75,6 +138,9 @@ impl SlotStatus { pub type Result = std::result::Result; +/// Defines an AccountsDb plugin, to stream data from the runtime. +/// AccountsDb plugins must describe desired behavior for load and unload, +/// as well as how they will handle streamed data. pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug { fn name(&self) -> &'static str; @@ -93,6 +159,9 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug { fn on_unload(&mut self) {} /// Called when an account is updated at a slot. + /// When `is_startup` is true, it indicates the account is loaded from + /// snapshots when the validator starts up. When `is_startup` is false, + /// the account is updated during transaction processing. #[allow(unused_variables)] fn update_account( &mut self, @@ -129,6 +198,12 @@ pub trait AccountsDbPlugin: Any + Send + Sync + std::fmt::Debug { Ok(()) } + /// Called when block's metadata is updated. + #[allow(unused_variables)] + fn notify_block_metadata(&mut self, blockinfo: ReplicaBlockInfoVersions) -> Result<()> { + Ok(()) + } + /// Check if the plugin is interested in account data /// Default is true -- if the plugin is not interested in /// account data, please return false. diff --git a/accountsdb-plugin-manager/Cargo.toml b/accountsdb-plugin-manager/Cargo.toml index d645b0dc58aae7..507f18e339df95 100644 --- a/accountsdb-plugin-manager/Cargo.toml +++ b/accountsdb-plugin-manager/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accountsdb-plugin-manager" description = "The Solana AccountsDb plugin manager." -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -17,14 +17,14 @@ log = "0.4.11" serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" -solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } +solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } thiserror = "1.0.30" [package.metadata.docs.rs] diff --git a/accountsdb-plugin-manager/src/accountsdb_plugin_service.rs b/accountsdb-plugin-manager/src/accountsdb_plugin_service.rs index fefaa8c1b8c3eb..25c74bf262e602 100644 --- a/accountsdb-plugin-manager/src/accountsdb_plugin_service.rs +++ b/accountsdb-plugin-manager/src/accountsdb_plugin_service.rs @@ -2,6 +2,8 @@ use { crate::{ accounts_update_notifier::AccountsUpdateNotifierImpl, accountsdb_plugin_manager::AccountsDbPluginManager, + block_metadata_notifier::BlockMetadataNotifierImpl, + block_metadata_notifier_interface::BlockMetadataNotifierLock, slot_status_notifier::SlotStatusNotifierImpl, slot_status_observer::SlotStatusObserver, transaction_notifier::TransactionNotifierImpl, }, @@ -50,6 +52,7 @@ pub struct AccountsDbPluginService { plugin_manager: Arc>, accounts_update_notifier: Option, transaction_notifier: Option, + block_metadata_notifier: Option, } impl AccountsDbPluginService { @@ -102,17 +105,24 @@ impl AccountsDbPluginService { None }; - let slot_status_observer = - if account_data_notifications_enabled || transaction_notifications_enabled { - let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone()); - let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier)); + let (slot_status_observer, block_metadata_notifier): ( + Option, + Option, + ) = if account_data_notifications_enabled || transaction_notifications_enabled { + let slot_status_notifier = SlotStatusNotifierImpl::new(plugin_manager.clone()); + let slot_status_notifier = Arc::new(RwLock::new(slot_status_notifier)); + ( Some(SlotStatusObserver::new( confirmed_bank_receiver, slot_status_notifier, - )) - } else { - None - }; + )), + Some(Arc::new(RwLock::new(BlockMetadataNotifierImpl::new( + plugin_manager.clone(), + )))), + ) + } else { + (None, None) + }; info!("Started AccountsDbPluginService"); Ok(AccountsDbPluginService { @@ -120,6 +130,7 @@ impl AccountsDbPluginService { plugin_manager, accounts_update_notifier, transaction_notifier, + block_metadata_notifier, }) } @@ -186,6 +197,10 @@ impl AccountsDbPluginService { self.transaction_notifier.clone() } + pub fn get_block_metadata_notifier(&self) -> Option { + self.block_metadata_notifier.clone() + } + pub fn join(self) -> thread::Result<()> { if let Some(mut slot_status_observer) = self.slot_status_observer { slot_status_observer.join()?; diff --git a/accountsdb-plugin-manager/src/block_metadata_notifier.rs b/accountsdb-plugin-manager/src/block_metadata_notifier.rs new file mode 100644 index 00000000000000..8291e9f038aace --- /dev/null +++ b/accountsdb-plugin-manager/src/block_metadata_notifier.rs @@ -0,0 +1,105 @@ +use { + crate::{ + accountsdb_plugin_manager::AccountsDbPluginManager, + block_metadata_notifier_interface::BlockMetadataNotifier, + }, + log::*, + solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{ + ReplicaBlockInfo, ReplicaBlockInfoVersions, + }, + solana_measure::measure::Measure, + solana_metrics::*, + solana_runtime::bank::RewardInfo, + solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey}, + solana_transaction_status::{Reward, Rewards}, + std::sync::{Arc, RwLock}, +}; + +pub(crate) struct BlockMetadataNotifierImpl { + plugin_manager: Arc>, +} + +impl BlockMetadataNotifier for BlockMetadataNotifierImpl { + /// Notify the block metadata + fn notify_block_metadata( + &self, + slot: u64, + blockhash: &str, + rewards: &RwLock>, + block_time: Option, + block_height: Option, + ) { + let mut plugin_manager = self.plugin_manager.write().unwrap(); + if plugin_manager.plugins.is_empty() { + return; + } + let rewards = Self::build_rewards(rewards); + + for plugin in plugin_manager.plugins.iter_mut() { + let mut measure = Measure::start("accountsdb-plugin-update-slot"); + let block_info = + Self::build_replica_block_info(slot, blockhash, &rewards, block_time, block_height); + let block_info = ReplicaBlockInfoVersions::V0_0_1(&block_info); + match plugin.notify_block_metadata(block_info) { + Err(err) => { + error!( + "Failed to update block metadata at slot {}, error: {} to plugin {}", + slot, + err, + plugin.name() + ) + } + Ok(_) => { + trace!( + "Successfully updated block metadata at slot {} to plugin {}", + slot, + plugin.name() + ); + } + } + measure.stop(); + inc_new_counter_debug!( + "accountsdb-plugin-update-block-metadata-us", + measure.as_us() as usize, + 1000, + 1000 + ); + } + } +} + +impl BlockMetadataNotifierImpl { + fn build_rewards(rewards: &RwLock>) -> Rewards { + let rewards = rewards.read().unwrap(); + rewards + .iter() + .map(|(pubkey, reward)| Reward { + pubkey: pubkey.to_string(), + lamports: reward.lamports, + post_balance: reward.post_balance, + reward_type: Some(reward.reward_type), + commission: reward.commission, + }) + .collect() + } + + fn build_replica_block_info<'a>( + slot: u64, + blockhash: &'a str, + rewards: &'a [Reward], + block_time: Option, + block_height: Option, + ) -> ReplicaBlockInfo<'a> { + ReplicaBlockInfo { + slot, + blockhash, + rewards, + block_time, + block_height, + } + } + + pub fn new(plugin_manager: Arc>) -> Self { + Self { plugin_manager } + } +} diff --git a/accountsdb-plugin-manager/src/block_metadata_notifier_interface.rs b/accountsdb-plugin-manager/src/block_metadata_notifier_interface.rs new file mode 100644 index 00000000000000..6d4b9f6ad2f569 --- /dev/null +++ b/accountsdb-plugin-manager/src/block_metadata_notifier_interface.rs @@ -0,0 +1,20 @@ +use { + solana_runtime::bank::RewardInfo, + solana_sdk::{clock::UnixTimestamp, pubkey::Pubkey}, + std::sync::{Arc, RwLock}, +}; + +/// Interface for notifying block metadata changes +pub trait BlockMetadataNotifier { + /// Notify the block metadata + fn notify_block_metadata( + &self, + slot: u64, + blockhash: &str, + rewards: &RwLock>, + block_time: Option, + block_height: Option, + ); +} + +pub type BlockMetadataNotifierLock = Arc>; diff --git a/accountsdb-plugin-manager/src/lib.rs b/accountsdb-plugin-manager/src/lib.rs index e12484eb05c96e..5af39e804e4754 100644 --- a/accountsdb-plugin-manager/src/lib.rs +++ b/accountsdb-plugin-manager/src/lib.rs @@ -1,6 +1,8 @@ pub mod accounts_update_notifier; pub mod accountsdb_plugin_manager; pub mod accountsdb_plugin_service; +pub mod block_metadata_notifier; +pub mod block_metadata_notifier_interface; pub mod slot_status_notifier; pub mod slot_status_observer; pub mod transaction_notifier; diff --git a/accountsdb-plugin-postgres/Cargo.toml b/accountsdb-plugin-postgres/Cargo.toml index d094d863379f53..16aab5b28055f6 100644 --- a/accountsdb-plugin-postgres/Cargo.toml +++ b/accountsdb-plugin-postgres/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-accountsdb-plugin-postgres" description = "The Solana AccountsDb plugin for PostgreSQL database." -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -22,18 +22,18 @@ postgres-types = { version = "0.2.2", features = ["derive"] } serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" -solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } +solana-accountsdb-plugin-interface = { path = "../accountsdb-plugin-interface", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } thiserror = "1.0.30" tokio-postgres = "0.7.4" [dev-dependencies] -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/accountsdb-plugin-postgres/scripts/create_schema.sql b/accountsdb-plugin-postgres/scripts/create_schema.sql index ec0de205dee6f7..994a2176cfb178 100644 --- a/accountsdb-plugin-postgres/scripts/create_schema.sql +++ b/accountsdb-plugin-postgres/scripts/create_schema.sql @@ -47,7 +47,8 @@ Create TYPE "TransactionErrorCode" AS ENUM ( 'WouldExceedMaxAccountCostLimit', 'WouldExceedMaxBlockCostLimit', 'UnsupportedVersion', - 'InvalidWritableAccount' + 'InvalidWritableAccount', + 'WouldExceedMaxAccountDataCostLimit' ); CREATE TYPE "TransactionError" AS ( @@ -113,9 +114,10 @@ CREATE TYPE "TransactionMessage" AS ( instructions "CompiledInstruction"[] ); -CREATE TYPE "AddressMapIndexes" AS ( - writable SMALLINT[], - readonly SMALLINT[] +CREATE TYPE "TransactionMessageAddressTableLookup" AS ( + account_key BYTEA, + writable_indexes SMALLINT[], + readonly_indexes SMALLINT[] ); CREATE TYPE "TransactionMessageV0" AS ( @@ -123,17 +125,17 @@ CREATE TYPE "TransactionMessageV0" AS ( account_keys BYTEA[], recent_blockhash BYTEA, instructions "CompiledInstruction"[], - address_map_indexes "AddressMapIndexes"[] + address_table_lookups "TransactionMessageAddressTableLookup"[] ); -CREATE TYPE "MappedAddresses" AS ( +CREATE TYPE "LoadedAddresses" AS ( writable BYTEA[], readonly BYTEA[] ); -CREATE TYPE "MappedMessage" AS ( +CREATE TYPE "LoadedMessageV0" AS ( message "TransactionMessageV0", - mapped_addresses "MappedAddresses" + loaded_addresses "LoadedAddresses" ); -- The table storing transactions @@ -143,7 +145,7 @@ CREATE TABLE transaction ( is_vote BOOL NOT NULL, message_type SMALLINT, -- 0: legacy, 1: v0 message legacy_message "TransactionMessage", - v0_mapped_message "MappedMessage", + v0_loaded_message "LoadedMessageV0", signatures BYTEA[], message_hash BYTEA, meta "TransactionStatusMeta", @@ -151,6 +153,16 @@ CREATE TABLE transaction ( CONSTRAINT transaction_pk PRIMARY KEY (slot, signature) ); +-- The table storing block metadata +CREATE TABLE block ( + slot BIGINT PRIMARY KEY, + blockhash VARCHAR(44), + rewards "Reward"[], + block_time BIGINT, + block_height BIGINT, + updated_on TIMESTAMP NOT NULL +); + /** * The following is for keeping historical data for accounts and is not required for plugin to work. */ diff --git a/accountsdb-plugin-postgres/scripts/drop_schema.sql b/accountsdb-plugin-postgres/scripts/drop_schema.sql index 419ab44169cb95..448564f93399cf 100644 --- a/accountsdb-plugin-postgres/scripts/drop_schema.sql +++ b/accountsdb-plugin-postgres/scripts/drop_schema.sql @@ -8,15 +8,16 @@ DROP TABLE account_audit; DROP TABLE account; DROP TABLE slot; DROP TABLE transaction; +DROP TABLE block; DROP TYPE "TransactionError" CASCADE; DROP TYPE "TransactionErrorCode" CASCADE; -DROP TYPE "MappedMessage" CASCADE; -DROP TYPE "MappedAddresses" CASCADE; +DROP TYPE "LoadedMessageV0" CASCADE; +DROP TYPE "LoadedAddresses" CASCADE; DROP TYPE "TransactionMessageV0" CASCADE; -DROP TYPE "AddressMapIndexes" CASCADE; DROP TYPE "TransactionMessage" CASCADE; DROP TYPE "TransactionMessageHeader" CASCADE; +DROP TYPE "TransactionMessageAddressTableLookup" CASCADE; DROP TYPE "TransactionStatusMeta" CASCADE; DROP TYPE "RewardType" CASCADE; DROP TYPE "Reward" CASCADE; diff --git a/accountsdb-plugin-postgres/src/accountsdb_plugin_postgres.rs b/accountsdb-plugin-postgres/src/accountsdb_plugin_postgres.rs index cecdbb6fa51829..45bb441bd9100d 100644 --- a/accountsdb-plugin-postgres/src/accountsdb_plugin_postgres.rs +++ b/accountsdb-plugin-postgres/src/accountsdb_plugin_postgres.rs @@ -12,7 +12,7 @@ use { serde_json, solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{ AccountsDbPlugin, AccountsDbPluginError, ReplicaAccountInfoVersions, - ReplicaTransactionInfoVersions, Result, SlotStatus, + ReplicaBlockInfoVersions, ReplicaTransactionInfoVersions, Result, SlotStatus, }, solana_metrics::*, std::{fs::File, io::Read}, @@ -41,6 +41,8 @@ pub struct AccountsDbPluginPostgresConfig { pub threads: Option, pub batch_size: Option, pub panic_on_db_errors: Option, + /// Indicates if to store historical data for accounts + pub store_account_historical_data: Option, } #[derive(Error, Debug)] @@ -74,7 +76,7 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres { /// Accounts either satisyfing the accounts condition or owners condition will be selected. /// When only owners is specified, /// all accounts belonging to the owners will be streamed. - /// The accounts field support wildcard to select all accounts: + /// The accounts field supports wildcard to select all accounts: /// "accounts_selector" : { /// "accounts" : \["*"\], /// } @@ -85,6 +87,8 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres { /// Please refer to https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html for the connection configuration. /// When `connection_str` is set, the values in "host", "user" and "port" are ignored. If `connection_str` is not given, /// `host` and `user` must be given. + /// "store_account_historical_data", optional, set it to 'true', to store historical account data to account_audit + /// table. /// * "threads" optional, specifies the number of worker threads for the plugin. A thread /// maintains a PostgreSQL connection to the server. The default is '10'. /// * "batch_size" optional, specifies the batch size of bulk insert when the AccountsDb is created @@ -334,6 +338,31 @@ impl AccountsDbPlugin for AccountsDbPluginPostgres { Ok(()) } + fn notify_block_metadata(&mut self, block_info: ReplicaBlockInfoVersions) -> Result<()> { + match &mut self.client { + None => { + return Err(AccountsDbPluginError::Custom(Box::new( + AccountsDbPluginPostgresError::DataStoreConnectionError { + msg: "There is no connection to the PostgreSQL database.".to_string(), + }, + ))); + } + Some(client) => match block_info { + ReplicaBlockInfoVersions::V0_0_1(block_info) => { + let result = client.update_block_metadata(block_info); + + if let Err(err) = result { + return Err(AccountsDbPluginError::SlotStatusUpdateError{ + msg: format!("Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", err) + }); + } + } + }, + } + + Ok(()) + } + /// Check if the plugin is interested in account data /// Default is true -- if the plugin is not interested in /// account data, please return false. diff --git a/accountsdb-plugin-postgres/src/postgres_client.rs b/accountsdb-plugin-postgres/src/postgres_client.rs index 1ae7c7ad7c573c..bd6ae5098f0e17 100644 --- a/accountsdb-plugin-postgres/src/postgres_client.rs +++ b/accountsdb-plugin-postgres/src/postgres_client.rs @@ -1,4 +1,6 @@ #![allow(clippy::integer_arithmetic)] + +mod postgres_client_block_metadata; mod postgres_client_transaction; /// A concurrent implementation for writing accounts into the PostgreSQL in parallel. @@ -10,9 +12,10 @@ use { crossbeam_channel::{bounded, Receiver, RecvTimeoutError, Sender}, log::*, postgres::{Client, NoTls, Statement}, + postgres_client_block_metadata::DbBlockInfo, postgres_client_transaction::LogTransactionRequest, solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{ - AccountsDbPluginError, ReplicaAccountInfo, SlotStatus, + AccountsDbPluginError, ReplicaAccountInfo, ReplicaBlockInfo, SlotStatus, }, solana_measure::measure::Measure, solana_metrics::*, @@ -36,6 +39,7 @@ const DEFAULT_THREADS_COUNT: usize = 100; const DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE: usize = 10; const ACCOUNT_COLUMN_COUNT: usize = 9; const DEFAULT_PANIC_ON_DB_ERROR: bool = false; +const DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA: bool = false; struct PostgresSqlClientWrapper { client: Client, @@ -44,6 +48,8 @@ struct PostgresSqlClientWrapper { update_slot_with_parent_stmt: Statement, update_slot_without_parent_stmt: Statement, update_transaction_log_stmt: Statement, + update_block_metadata_stmt: Statement, + insert_account_audit_stmt: Option, } pub struct SimplePostgresClient { @@ -195,6 +201,11 @@ pub trait PostgresClient { &mut self, transaction_log_info: LogTransactionRequest, ) -> Result<(), AccountsDbPluginError>; + + fn update_block_metadata( + &mut self, + block_info: UpdateBlockMetadataRequest, + ) -> Result<(), AccountsDbPluginError>; } impl SimplePostgresClient { @@ -315,6 +326,28 @@ impl SimplePostgresClient { } } + fn build_account_audit_insert_statement( + client: &mut Client, + config: &AccountsDbPluginPostgresConfig, + ) -> Result { + let stmt = "INSERT INTO account_audit (pubkey, slot, owner, lamports, executable, rent_epoch, data, write_version, updated_on) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)"; + + let stmt = client.prepare(stmt); + + match stmt { + Err(err) => { + return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError { + msg: format!( + "Error in preparing for the account_audit update PostgreSQL database: {} host: {:?} user: {:?} config: {:?}", + err, config.host, config.user, config + ), + }))); + } + Ok(stmt) => Ok(stmt), + } + } + fn build_slot_upsert_statement_with_parent( client: &mut Client, config: &AccountsDbPluginPostgresConfig, @@ -361,16 +394,52 @@ impl SimplePostgresClient { } } + /// Internal function for inserting an account into account_audit table. + fn insert_account_audit( + account: &DbAccountInfo, + statement: &Statement, + client: &mut Client, + ) -> Result<(), AccountsDbPluginError> { + let lamports = account.lamports() as i64; + let rent_epoch = account.rent_epoch() as i64; + let updated_on = Utc::now().naive_utc(); + let result = client.execute( + statement, + &[ + &account.pubkey(), + &account.slot, + &account.owner(), + &lamports, + &account.executable(), + &rent_epoch, + &account.data(), + &account.write_version(), + &updated_on, + ], + ); + + if let Err(err) = result { + let msg = format!( + "Failed to persist the insert of account_audit to the PostgreSQL database. Error: {:?}", + err + ); + error!("{}", msg); + return Err(AccountsDbPluginError::AccountsUpdateError { msg }); + } + Ok(()) + } + /// Internal function for updating or inserting a single account fn upsert_account_internal( account: &DbAccountInfo, statement: &Statement, client: &mut Client, + insert_account_audit_stmt: &Option, ) -> Result<(), AccountsDbPluginError> { let lamports = account.lamports() as i64; let rent_epoch = account.rent_epoch() as i64; let updated_on = Utc::now().naive_utc(); - let result = client.query( + let result = client.execute( statement, &[ &account.pubkey(), @@ -392,6 +461,11 @@ impl SimplePostgresClient { ); error!("{}", msg); return Err(AccountsDbPluginError::AccountsUpdateError { msg }); + } else if result.unwrap() == 0 && insert_account_audit_stmt.is_some() { + // If no records modified (inserted or updated), it is because the account is updated + // at an older slot, insert the record directly into the account_audit table. + let statement = insert_account_audit_stmt.as_ref().unwrap(); + Self::insert_account_audit(account, statement, client)?; } Ok(()) @@ -400,9 +474,10 @@ impl SimplePostgresClient { /// Update or insert a single account fn upsert_account(&mut self, account: &DbAccountInfo) -> Result<(), AccountsDbPluginError> { let client = self.client.get_mut().unwrap(); + let insert_account_audit_stmt = &client.insert_account_audit_stmt; let statement = &client.update_account_stmt; let client = &mut client.client; - Self::upsert_account_internal(account, statement, client) + Self::upsert_account_internal(account, statement, client, insert_account_audit_stmt) } /// Insert accounts in batch to reduce network overhead @@ -478,11 +553,12 @@ impl SimplePostgresClient { } let client = self.client.get_mut().unwrap(); + let insert_account_audit_stmt = &client.insert_account_audit_stmt; let statement = &client.update_account_stmt; let client = &mut client.client; for account in self.pending_account_updates.drain(..) { - Self::upsert_account_internal(&account, statement, client)?; + Self::upsert_account_internal(&account, statement, client, insert_account_audit_stmt)?; } Ok(()) @@ -501,10 +577,24 @@ impl SimplePostgresClient { Self::build_slot_upsert_statement_without_parent(&mut client, config)?; let update_transaction_log_stmt = Self::build_transaction_info_upsert_statement(&mut client, config)?; + let update_block_metadata_stmt = + Self::build_block_metadata_upsert_statement(&mut client, config)?; let batch_size = config .batch_size .unwrap_or(DEFAULT_ACCOUNTS_INSERT_BATCH_SIZE); + + let store_account_historical_data = config + .store_account_historical_data + .unwrap_or(DEFAULT_STORE_ACCOUNT_HISTORICAL_DATA); + + let insert_account_audit_stmt = if store_account_historical_data { + let stmt = Self::build_account_audit_insert_statement(&mut client, config)?; + Some(stmt) + } else { + None + }; + info!("Created SimplePostgresClient."); Ok(Self { batch_size, @@ -516,6 +606,8 @@ impl SimplePostgresClient { update_slot_with_parent_stmt, update_slot_without_parent_stmt, update_transaction_log_stmt, + update_block_metadata_stmt, + insert_account_audit_stmt, }), }) } @@ -591,6 +683,13 @@ impl PostgresClient for SimplePostgresClient { ) -> Result<(), AccountsDbPluginError> { self.log_transaction_impl(transaction_log_info) } + + fn update_block_metadata( + &mut self, + block_info: UpdateBlockMetadataRequest, + ) -> Result<(), AccountsDbPluginError> { + self.update_block_metadata_impl(block_info) + } } struct UpdateAccountRequest { @@ -604,11 +703,16 @@ struct UpdateSlotRequest { slot_status: SlotStatus, } +pub struct UpdateBlockMetadataRequest { + pub block_info: DbBlockInfo, +} + #[warn(clippy::large_enum_variant)] enum DbWorkItem { UpdateAccount(Box), UpdateSlot(Box), LogTransaction(Box), + UpdateBlockMetadata(Box), } impl PostgresClientWorker { @@ -672,6 +776,14 @@ impl PostgresClientWorker { DbWorkItem::LogTransaction(transaction_log_info) => { self.client.log_transaction(*transaction_log_info)?; } + DbWorkItem::UpdateBlockMetadata(block_info) => { + if let Err(err) = self.client.update_block_metadata(*block_info) { + error!("Failed to update block metadata: ({})", err); + if panic_on_db_errors { + abort(); + } + } + } }, Err(err) => match err { RecvTimeoutError::Timeout => { @@ -863,6 +975,25 @@ impl ParallelPostgresClient { Ok(()) } + pub fn update_block_metadata( + &mut self, + block_info: &ReplicaBlockInfo, + ) -> Result<(), AccountsDbPluginError> { + if let Err(err) = self.sender.send(DbWorkItem::UpdateBlockMetadata(Box::new( + UpdateBlockMetadataRequest { + block_info: DbBlockInfo::from(block_info), + }, + ))) { + return Err(AccountsDbPluginError::SlotStatusUpdateError { + msg: format!( + "Failed to update the block metadata at slot {:?}, error: {:?}", + block_info.slot, err + ), + }); + } + Ok(()) + } + pub fn notify_end_of_startup(&mut self) -> Result<(), AccountsDbPluginError> { info!("Notifying the end of startup"); // Ensure all items in the queue has been received by the workers diff --git a/accountsdb-plugin-postgres/src/postgres_client/postgres_client_block_metadata.rs b/accountsdb-plugin-postgres/src/postgres_client/postgres_client_block_metadata.rs new file mode 100644 index 00000000000000..a882e6767c7d42 --- /dev/null +++ b/accountsdb-plugin-postgres/src/postgres_client/postgres_client_block_metadata.rs @@ -0,0 +1,97 @@ +use { + crate::{ + accountsdb_plugin_postgres::{ + AccountsDbPluginPostgresConfig, AccountsDbPluginPostgresError, + }, + postgres_client::{ + postgres_client_transaction::DbReward, SimplePostgresClient, UpdateBlockMetadataRequest, + }, + }, + chrono::Utc, + log::*, + postgres::{Client, Statement}, + solana_accountsdb_plugin_interface::accountsdb_plugin_interface::{ + AccountsDbPluginError, ReplicaBlockInfo, + }, +}; + +#[derive(Clone, Debug)] +pub struct DbBlockInfo { + pub slot: i64, + pub blockhash: String, + pub rewards: Vec, + pub block_time: Option, + pub block_height: Option, +} + +impl<'a> From<&ReplicaBlockInfo<'a>> for DbBlockInfo { + fn from(block_info: &ReplicaBlockInfo) -> Self { + Self { + slot: block_info.slot as i64, + blockhash: block_info.blockhash.to_string(), + rewards: block_info.rewards.iter().map(DbReward::from).collect(), + block_time: block_info.block_time, + block_height: block_info + .block_height + .map(|block_height| block_height as i64), + } + } +} + +impl SimplePostgresClient { + pub(crate) fn build_block_metadata_upsert_statement( + client: &mut Client, + config: &AccountsDbPluginPostgresConfig, + ) -> Result { + let stmt = + "INSERT INTO block (slot, blockhash, rewards, block_time, block_height, updated_on) \ + VALUES ($1, $2, $3, $4, $5, $6)"; + + let stmt = client.prepare(stmt); + + match stmt { + Err(err) => { + return Err(AccountsDbPluginError::Custom(Box::new(AccountsDbPluginPostgresError::DataSchemaError { + msg: format!( + "Error in preparing for the block metadata update PostgreSQL database: ({}) host: {:?} user: {:?} config: {:?}", + err, config.host, config.user, config + ), + }))); + } + Ok(stmt) => Ok(stmt), + } + } + + pub(crate) fn update_block_metadata_impl( + &mut self, + block_info: UpdateBlockMetadataRequest, + ) -> Result<(), AccountsDbPluginError> { + let client = self.client.get_mut().unwrap(); + let statement = &client.update_block_metadata_stmt; + let client = &mut client.client; + let updated_on = Utc::now().naive_utc(); + + let block_info = block_info.block_info; + let result = client.query( + statement, + &[ + &block_info.slot, + &block_info.blockhash, + &block_info.rewards, + &block_info.block_time, + &block_info.block_height, + &updated_on, + ], + ); + + if let Err(err) = result { + let msg = format!( + "Failed to persist the update of block metadata to the PostgreSQL database. Error: {:?}", + err); + error!("{}", msg); + return Err(AccountsDbPluginError::AccountsUpdateError { msg }); + } + + Ok(()) + } +} diff --git a/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs b/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs index bdc30b158c83ad..37112b0b10f1f0 100644 --- a/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs +++ b/accountsdb-plugin-postgres/src/postgres_client/postgres_client_transaction.rs @@ -18,8 +18,8 @@ use { solana_sdk::{ instruction::CompiledInstruction, message::{ - v0::{self, AddressMapIndexes}, - MappedAddresses, MappedMessage, Message, MessageHeader, SanitizedMessage, + v0::{self, LoadedAddresses, MessageAddressTableLookup}, + Message, MessageHeader, SanitizedMessage, }, transaction::TransactionError, }, @@ -105,10 +105,11 @@ pub struct DbTransactionMessage { } #[derive(Clone, Debug, ToSql)] -#[postgres(name = "AddressMapIndexes")] -pub struct DbAddressMapIndexes { - pub writable: Vec, - pub readonly: Vec, +#[postgres(name = "TransactionMessageAddressTableLookup")] +pub struct DbTransactionMessageAddressTableLookup { + pub account_key: Vec, + pub writable_indexes: Vec, + pub readonly_indexes: Vec, } #[derive(Clone, Debug, ToSql)] @@ -118,21 +119,21 @@ pub struct DbTransactionMessageV0 { pub account_keys: Vec>, pub recent_blockhash: Vec, pub instructions: Vec, - pub address_map_indexes: Vec, + pub address_table_lookups: Vec, } #[derive(Clone, Debug, ToSql)] -#[postgres(name = "MappedAddresses")] -pub struct DbMappedAddresses { +#[postgres(name = "LoadedAddresses")] +pub struct DbLoadedAddresses { pub writable: Vec>, pub readonly: Vec>, } #[derive(Clone, Debug, ToSql)] -#[postgres(name = "MappedMessage")] -pub struct DbMappedMessage { +#[postgres(name = "LoadedMessageV0")] +pub struct DbLoadedMessageV0 { pub message: DbTransactionMessageV0, - pub mapped_addresses: DbMappedAddresses, + pub loaded_addresses: DbLoadedAddresses, } pub struct DbTransaction { @@ -141,7 +142,7 @@ pub struct DbTransaction { pub slot: i64, pub message_type: i16, pub legacy_message: Option, - pub v0_mapped_message: Option, + pub v0_loaded_message: Option, pub message_hash: Vec, pub meta: DbTransactionStatusMeta, pub signatures: Vec>, @@ -151,32 +152,33 @@ pub struct LogTransactionRequest { pub transaction_info: DbTransaction, } -impl From<&AddressMapIndexes> for DbAddressMapIndexes { - fn from(address_map_indexes: &AddressMapIndexes) -> Self { +impl From<&MessageAddressTableLookup> for DbTransactionMessageAddressTableLookup { + fn from(address_table_lookup: &MessageAddressTableLookup) -> Self { Self { - writable: address_map_indexes - .writable + account_key: address_table_lookup.account_key.as_ref().to_vec(), + writable_indexes: address_table_lookup + .writable_indexes .iter() - .map(|address_idx| *address_idx as i16) + .map(|idx| *idx as i16) .collect(), - readonly: address_map_indexes - .readonly + readonly_indexes: address_table_lookup + .readonly_indexes .iter() - .map(|address_idx| *address_idx as i16) + .map(|idx| *idx as i16) .collect(), } } } -impl From<&MappedAddresses> for DbMappedAddresses { - fn from(mapped_addresses: &MappedAddresses) -> Self { +impl From<&LoadedAddresses> for DbLoadedAddresses { + fn from(loaded_addresses: &LoadedAddresses) -> Self { Self { - writable: mapped_addresses + writable: loaded_addresses .writable .iter() .map(|pubkey| pubkey.as_ref().to_vec()) .collect(), - readonly: mapped_addresses + readonly: loaded_addresses .readonly .iter() .map(|pubkey| pubkey.as_ref().to_vec()) @@ -243,20 +245,20 @@ impl From<&v0::Message> for DbTransactionMessageV0 { .iter() .map(DbCompiledInstruction::from) .collect(), - address_map_indexes: message - .address_map_indexes + address_table_lookups: message + .address_table_lookups .iter() - .map(DbAddressMapIndexes::from) + .map(DbTransactionMessageAddressTableLookup::from) .collect(), } } } -impl From<&MappedMessage> for DbMappedMessage { - fn from(message: &MappedMessage) -> Self { +impl From<&v0::LoadedMessage> for DbLoadedMessageV0 { + fn from(message: &v0::LoadedMessage) -> Self { Self { message: DbTransactionMessageV0::from(&message.message), - mapped_addresses: DbMappedAddresses::from(&message.mapped_addresses), + loaded_addresses: DbLoadedAddresses::from(&message.loaded_addresses), } } } @@ -328,6 +330,8 @@ pub enum DbTransactionErrorCode { WouldExceedMaxBlockCostLimit, UnsupportedVersion, InvalidWritableAccount, + WouldExceedMaxAccountDataCostLimit, + TooManyAccountLocks, } impl From<&TransactionError> for DbTransactionErrorCode { @@ -356,6 +360,10 @@ impl From<&TransactionError> for DbTransactionErrorCode { TransactionError::WouldExceedMaxBlockCostLimit => Self::WouldExceedMaxBlockCostLimit, TransactionError::UnsupportedVersion => Self::UnsupportedVersion, TransactionError::InvalidWritableAccount => Self::InvalidWritableAccount, + TransactionError::WouldExceedMaxAccountDataCostLimit => { + Self::WouldExceedMaxAccountDataCostLimit + } + TransactionError::TooManyAccountLocks => Self::TooManyAccountLocks, } } } @@ -460,8 +468,8 @@ fn build_db_transaction(slot: u64, transaction_info: &ReplicaTransactionInfo) -> } _ => None, }, - v0_mapped_message: match transaction_info.transaction.message() { - SanitizedMessage::V0(mapped_message) => Some(DbMappedMessage::from(mapped_message)), + v0_loaded_message: match transaction_info.transaction.message() { + SanitizedMessage::V0(loaded_message) => Some(DbLoadedMessageV0::from(loaded_message)), _ => None, }, signatures: transaction_info @@ -485,8 +493,16 @@ impl SimplePostgresClient { config: &AccountsDbPluginPostgresConfig, ) -> Result { let stmt = "INSERT INTO transaction AS txn (signature, is_vote, slot, message_type, legacy_message, \ - v0_mapped_message, signatures, message_hash, meta, updated_on) \ - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)"; + v0_loaded_message, signatures, message_hash, meta, updated_on) \ + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) \ + ON CONFLICT (slot, signature) DO UPDATE SET is_vote=excluded.is_vote, \ + message_type=excluded.message_type, \ + legacy_message=excluded.legacy_message, \ + v0_loaded_message=excluded.v0_loaded_message, \ + signatures=excluded.signatures, \ + message_hash=excluded.message_hash, \ + meta=excluded.meta, \ + updated_on=excluded.updated_on"; let stmt = client.prepare(stmt); @@ -521,7 +537,7 @@ impl SimplePostgresClient { &transaction_info.slot, &transaction_info.message_type, &transaction_info.legacy_message, - &transaction_info.v0_mapped_message, + &transaction_info.v0_loaded_message, &transaction_info.signatures, &transaction_info.message_hash, &transaction_info.meta, @@ -670,42 +686,44 @@ pub(crate) mod tests { check_inner_instructions_equality(&inner_instructions, &db_inner_instructions); } - fn check_address_map_indexes_equality( - address_map_indexes: &AddressMapIndexes, - db_address_map_indexes: &DbAddressMapIndexes, + fn check_address_table_lookups_equality( + address_table_lookups: &MessageAddressTableLookup, + db_address_table_lookups: &DbTransactionMessageAddressTableLookup, ) { assert_eq!( - address_map_indexes.writable.len(), - db_address_map_indexes.writable.len() + address_table_lookups.writable_indexes.len(), + db_address_table_lookups.writable_indexes.len() ); assert_eq!( - address_map_indexes.readonly.len(), - db_address_map_indexes.readonly.len() + address_table_lookups.readonly_indexes.len(), + db_address_table_lookups.readonly_indexes.len() ); - for i in 0..address_map_indexes.writable.len() { + for i in 0..address_table_lookups.writable_indexes.len() { assert_eq!( - address_map_indexes.writable[i], - db_address_map_indexes.writable[i] as u8 + address_table_lookups.writable_indexes[i], + db_address_table_lookups.writable_indexes[i] as u8 ) } - for i in 0..address_map_indexes.readonly.len() { + for i in 0..address_table_lookups.readonly_indexes.len() { assert_eq!( - address_map_indexes.readonly[i], - db_address_map_indexes.readonly[i] as u8 + address_table_lookups.readonly_indexes[i], + db_address_table_lookups.readonly_indexes[i] as u8 ) } } #[test] - fn test_transform_address_map_indexes() { - let address_map_indexes = AddressMapIndexes { - writable: vec![1, 2, 3], - readonly: vec![4, 5, 6], + fn test_transform_address_table_lookups() { + let address_table_lookups = MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![4, 5, 6], }; - let db_address_map_indexes = DbAddressMapIndexes::from(&address_map_indexes); - check_address_map_indexes_equality(&address_map_indexes, &db_address_map_indexes); + let db_address_table_lookups = + DbTransactionMessageAddressTableLookup::from(&address_table_lookups); + check_address_table_lookups_equality(&address_table_lookups, &db_address_table_lookups); } fn check_reward_equality(reward: &Reward, db_reward: &DbReward) { @@ -1089,7 +1107,7 @@ pub(crate) mod tests { check_transaction_message_equality(&message, &db_message); } - fn check_transaction_messagev0_equality( + fn check_transaction_message_v0_equality( message: &v0::Message, db_message: &DbTransactionMessageV0, ) { @@ -1106,18 +1124,18 @@ pub(crate) mod tests { ); } assert_eq!( - message.address_map_indexes.len(), - db_message.address_map_indexes.len() + message.address_table_lookups.len(), + db_message.address_table_lookups.len() ); - for i in 0..message.address_map_indexes.len() { - check_address_map_indexes_equality( - &message.address_map_indexes[i], - &db_message.address_map_indexes[i], + for i in 0..message.address_table_lookups.len() { + check_address_table_lookups_equality( + &message.address_table_lookups[i], + &db_message.address_table_lookups[i], ); } } - fn build_transaction_messagev0() -> v0::Message { + fn build_transaction_message_v0() -> v0::Message { v0::Message { header: MessageHeader { num_readonly_signed_accounts: 2, @@ -1144,71 +1162,76 @@ pub(crate) mod tests { data: vec![14, 15, 16], }, ], - address_map_indexes: vec![ - AddressMapIndexes { - writable: vec![0], - readonly: vec![1, 2], + address_table_lookups: vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![1, 2], }, - AddressMapIndexes { - writable: vec![1], - readonly: vec![0, 2], + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1], + readonly_indexes: vec![0, 2], }, ], } } #[test] - fn test_transform_transaction_messagev0() { - let message = build_transaction_messagev0(); + fn test_transform_transaction_message_v0() { + let message = build_transaction_message_v0(); let db_message = DbTransactionMessageV0::from(&message); - check_transaction_messagev0_equality(&message, &db_message); + check_transaction_message_v0_equality(&message, &db_message); } - fn check_mapped_addresses( - mapped_addresses: &MappedAddresses, - db_mapped_addresses: &DbMappedAddresses, + fn check_loaded_addresses( + loaded_addresses: &LoadedAddresses, + db_loaded_addresses: &DbLoadedAddresses, ) { assert_eq!( - mapped_addresses.writable.len(), - db_mapped_addresses.writable.len() + loaded_addresses.writable.len(), + db_loaded_addresses.writable.len() ); - for i in 0..mapped_addresses.writable.len() { + for i in 0..loaded_addresses.writable.len() { assert_eq!( - mapped_addresses.writable[i].as_ref(), - db_mapped_addresses.writable[i] + loaded_addresses.writable[i].as_ref(), + db_loaded_addresses.writable[i] ); } assert_eq!( - mapped_addresses.readonly.len(), - db_mapped_addresses.readonly.len() + loaded_addresses.readonly.len(), + db_loaded_addresses.readonly.len() ); - for i in 0..mapped_addresses.readonly.len() { + for i in 0..loaded_addresses.readonly.len() { assert_eq!( - mapped_addresses.readonly[i].as_ref(), - db_mapped_addresses.readonly[i] + loaded_addresses.readonly[i].as_ref(), + db_loaded_addresses.readonly[i] ); } } - fn check_mapped_message_equality(message: &MappedMessage, db_message: &DbMappedMessage) { - check_transaction_messagev0_equality(&message.message, &db_message.message); - check_mapped_addresses(&message.mapped_addresses, &db_message.mapped_addresses); + fn check_loaded_message_v0_equality( + message: &v0::LoadedMessage, + db_message: &DbLoadedMessageV0, + ) { + check_transaction_message_v0_equality(&message.message, &db_message.message); + check_loaded_addresses(&message.loaded_addresses, &db_message.loaded_addresses); } #[test] - fn test_transform_mapped_message() { - let message = MappedMessage { - message: build_transaction_messagev0(), - mapped_addresses: MappedAddresses { + fn test_transform_loaded_message_v0() { + let message = v0::LoadedMessage { + message: build_transaction_message_v0(), + loaded_addresses: LoadedAddresses { writable: vec![Pubkey::new_unique(), Pubkey::new_unique()], readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()], }, }; - let db_message = DbMappedMessage::from(&message); - check_mapped_message_equality(&message, &db_message); + let db_message = DbLoadedMessageV0::from(&message); + check_loaded_message_v0_equality(&message, &db_message); } fn check_transaction( @@ -1229,9 +1252,9 @@ pub(crate) mod tests { } SanitizedMessage::V0(message) => { assert_eq!(db_transaction.message_type, 1); - check_mapped_message_equality( + check_loaded_message_v0_equality( message, - db_transaction.v0_mapped_message.as_ref().unwrap(), + db_transaction.v0_loaded_message.as_ref().unwrap(), ); } } @@ -1298,7 +1321,7 @@ pub(crate) mod tests { Signature::new(&[2u8; 64]), Signature::new(&[3u8; 64]), ], - message: VersionedMessage::V0(build_transaction_messagev0()), + message: VersionedMessage::V0(build_transaction_message_v0()), } } @@ -1313,7 +1336,7 @@ pub(crate) mod tests { let transaction = SanitizedTransaction::try_create(transaction, message_hash, Some(true), |_message| { - Ok(MappedAddresses { + Ok(LoadedAddresses { writable: vec![Pubkey::new_unique(), Pubkey::new_unique()], readonly: vec![Pubkey::new_unique(), Pubkey::new_unique()], }) diff --git a/banking-bench/Cargo.toml b/banking-bench/Cargo.toml index 6bd682183740ec..2ef606ab7dfbbd 100644 --- a/banking-bench/Cargo.toml +++ b/banking-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-banking-bench" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,17 +14,17 @@ crossbeam-channel = "0.5" log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" -solana-core = { path = "../core", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-poh = { path = "../poh", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-poh = { path = "../poh", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/banking-bench/src/main.rs b/banking-bench/src/main.rs index 24ba546676bb56..3d2f57f4bcf46f 100644 --- a/banking-bench/src/main.rs +++ b/banking-bench/src/main.rs @@ -13,7 +13,7 @@ use { get_tmp_ledger_path, }, solana_measure::measure::Measure, - solana_perf::packet::to_packets_chunked, + solana_perf::packet::to_packet_batches, solana_poh::poh_recorder::{create_test_recorder, PohRecorder, WorkingBankEntry}, solana_runtime::{ accounts_background_service::AbsRequestSender, bank::Bank, bank_forks::BankForks, @@ -212,7 +212,7 @@ fn main() { bank.clear_signatures(); } - let mut verified: Vec<_> = to_packets_chunked(&transactions, packets_per_chunk); + let mut verified: Vec<_> = to_packet_batches(&transactions, packets_per_chunk); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new( @@ -364,7 +364,7 @@ fn main() { let sig: Vec = (0..64).map(|_| thread_rng().gen::()).collect(); tx.signatures[0] = Signature::new(&sig[0..64]); } - verified = to_packets_chunked(&transactions.clone(), packets_per_chunk); + verified = to_packet_batches(&transactions.clone(), packets_per_chunk); } start += chunk_len; diff --git a/banks-client/Cargo.toml b/banks-client/Cargo.toml index 3dfc7249f5a15b..fc01d0df16e184 100644 --- a/banks-client/Cargo.toml +++ b/banks-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-client" -version = "1.9.0" +version = "1.9.4" description = "Solana banks client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,16 +12,17 @@ edition = "2021" [dependencies] borsh = "0.9.1" futures = "0.3" -solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" } -solana-program = { path = "../sdk/program", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -tarpc = { version = "0.26.2", features = ["full"] } +solana-banks-interface = { path = "../banks-interface", version = "=1.9.4" } +solana-program = { path = "../sdk/program", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +tarpc = { version = "0.27.2", features = ["full"] } +thiserror = "1.0" tokio = { version = "1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } [dev-dependencies] -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-banks-server = { path = "../banks-server", version = "=1.9.0" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-banks-server = { path = "../banks-server", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/banks-client/src/error.rs b/banks-client/src/error.rs new file mode 100644 index 00000000000000..6f27f3ce5f3740 --- /dev/null +++ b/banks-client/src/error.rs @@ -0,0 +1,73 @@ +use { + solana_sdk::{transaction::TransactionError, transport::TransportError}, + std::io, + tarpc::client::RpcError, + thiserror::Error, +}; + +/// Errors from BanksClient +#[derive(Error, Debug)] +pub enum BanksClientError { + #[error("client error: {0}")] + ClientError(&'static str), + + #[error(transparent)] + Io(#[from] io::Error), + + #[error(transparent)] + RpcError(#[from] RpcError), + + #[error("transport transaction error: {0}")] + TransactionError(#[from] TransactionError), + + #[error("simulation error: {err:?}, logs: {logs:?}, units_consumed: {units_consumed:?}")] + SimulationError { + err: TransactionError, + logs: Vec, + units_consumed: u64, + }, +} + +impl BanksClientError { + pub fn unwrap(&self) -> TransactionError { + match self { + BanksClientError::TransactionError(err) + | BanksClientError::SimulationError { err, .. } => err.clone(), + _ => panic!("unexpected transport error"), + } + } +} + +impl From for io::Error { + fn from(err: BanksClientError) -> Self { + match err { + BanksClientError::ClientError(err) => Self::new(io::ErrorKind::Other, err.to_string()), + BanksClientError::Io(err) => err, + BanksClientError::RpcError(err) => Self::new(io::ErrorKind::Other, err.to_string()), + BanksClientError::TransactionError(err) => { + Self::new(io::ErrorKind::Other, err.to_string()) + } + BanksClientError::SimulationError { err, .. } => { + Self::new(io::ErrorKind::Other, err.to_string()) + } + } + } +} + +impl From for TransportError { + fn from(err: BanksClientError) -> Self { + match err { + BanksClientError::ClientError(err) => { + Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string())) + } + BanksClientError::Io(err) => { + Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string())) + } + BanksClientError::RpcError(err) => { + Self::IoError(io::Error::new(io::ErrorKind::Other, err.to_string())) + } + BanksClientError::TransactionError(err) => Self::TransactionError(err), + BanksClientError::SimulationError { err, .. } => Self::TransactionError(err), + } + } +} diff --git a/banks-client/src/lib.rs b/banks-client/src/lib.rs index 1623705176ed00..f1ab2729318f17 100644 --- a/banks-client/src/lib.rs +++ b/banks-client/src/lib.rs @@ -7,9 +7,10 @@ pub use solana_banks_interface::{BanksClient as TarpcClient, TransactionStatus}; use { + crate::error::BanksClientError, borsh::BorshDeserialize, - futures::{future::join_all, Future, FutureExt}, - solana_banks_interface::{BanksRequest, BanksResponse}, + futures::{future::join_all, Future, FutureExt, TryFutureExt}, + solana_banks_interface::{BanksRequest, BanksResponse, BanksTransactionResultWithSimulation}, solana_program::{ clock::Slot, fee_calculator::FeeCalculator, hash::Hash, program_pack::Pack, pubkey::Pubkey, rent::Rent, sysvar::Sysvar, @@ -22,7 +23,7 @@ use { transaction::{self, Transaction}, transport, }, - std::io::{self, Error, ErrorKind}, + std::io, tarpc::{ client::{self, NewClient, RequestDispatch}, context::{self, Context}, @@ -33,6 +34,8 @@ use { tokio_serde::formats::Bincode, }; +mod error; + // This exists only for backward compatibility pub trait BanksClientExt {} @@ -58,7 +61,10 @@ impl BanksClient { ctx: Context, transaction: Transaction, ) -> impl Future> + '_ { - self.inner.send_transaction_with_context(ctx, transaction) + self.inner + .send_transaction_with_context(ctx, transaction) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } #[deprecated( @@ -73,6 +79,8 @@ impl BanksClient { #[allow(deprecated)] self.inner .get_fees_with_commitment_and_context(ctx, commitment) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } pub fn get_transaction_status_with_context( @@ -82,6 +90,8 @@ impl BanksClient { ) -> impl Future>> + '_ { self.inner .get_transaction_status_with_context(ctx, signature) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } pub fn get_slot_with_context( @@ -89,7 +99,10 @@ impl BanksClient { ctx: Context, commitment: CommitmentLevel, ) -> impl Future> + '_ { - self.inner.get_slot_with_context(ctx, commitment) + self.inner + .get_slot_with_context(ctx, commitment) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } pub fn get_block_height_with_context( @@ -97,7 +110,10 @@ impl BanksClient { ctx: Context, commitment: CommitmentLevel, ) -> impl Future> + '_ { - self.inner.get_block_height_with_context(ctx, commitment) + self.inner + .get_block_height_with_context(ctx, commitment) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } pub fn process_transaction_with_commitment_and_context( @@ -108,6 +124,24 @@ impl BanksClient { ) -> impl Future>>> + '_ { self.inner .process_transaction_with_commitment_and_context(ctx, transaction, commitment) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) + } + + pub fn process_transaction_with_preflight_and_commitment_and_context( + &mut self, + ctx: Context, + transaction: Transaction, + commitment: CommitmentLevel, + ) -> impl Future> + '_ + { + self.inner + .process_transaction_with_preflight_and_commitment_and_context( + ctx, + transaction, + commitment, + ) + .map_err(Into::into) } pub fn get_account_with_commitment_and_context( @@ -118,6 +152,8 @@ impl BanksClient { ) -> impl Future>> + '_ { self.inner .get_account_with_commitment_and_context(ctx, address, commitment) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } /// Send a transaction and return immediately. The server will resend the @@ -148,9 +184,13 @@ impl BanksClient { pub fn get_sysvar(&mut self) -> impl Future> + '_ { self.get_account(T::id()).map(|result| { let sysvar = result? - .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Sysvar not present"))?; + .ok_or(BanksClientError::ClientError("Sysvar not present")) + .map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError from_account::(&sysvar) - .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Failed to deserialize sysvar")) + .ok_or(BanksClientError::ClientError( + "Failed to deserialize sysvar", + )) + .map_err(Into::into) // Remove this when return Err type updated to BanksClientError }) } @@ -164,7 +204,8 @@ impl BanksClient { /// method to get both a blockhash and the blockhash's last valid slot. #[deprecated(since = "1.9.0", note = "Please use `get_latest_blockhash` instead")] pub fn get_recent_blockhash(&mut self) -> impl Future> + '_ { - self.get_latest_blockhash() + #[allow(deprecated)] + self.get_fees().map(|result| Ok(result?.1)) } /// Send a transaction and return after the transaction has been rejected or @@ -178,11 +219,60 @@ impl BanksClient { ctx.deadline += Duration::from_secs(50); self.process_transaction_with_commitment_and_context(ctx, transaction, commitment) .map(|result| match result? { - None => { - Err(Error::new(ErrorKind::TimedOut, "invalid blockhash or fee-payer").into()) - } + None => Err(BanksClientError::ClientError( + "invalid blockhash or fee-payer", + )), Some(transaction_result) => Ok(transaction_result?), }) + .map_err(Into::into) // Remove this when return Err type updated to BanksClientError + } + + /// Send a transaction and return any preflight (sanitization or simulation) errors, or return + /// after the transaction has been rejected or reached the given level of commitment. + pub fn process_transaction_with_preflight_and_commitment( + &mut self, + transaction: Transaction, + commitment: CommitmentLevel, + ) -> impl Future> + '_ { + let mut ctx = context::current(); + ctx.deadline += Duration::from_secs(50); + self.process_transaction_with_preflight_and_commitment_and_context( + ctx, + transaction, + commitment, + ) + .map(|result| match result? { + BanksTransactionResultWithSimulation { + result: None, + simulation_details: _, + } => Err(BanksClientError::ClientError( + "invalid blockhash or fee-payer", + )), + BanksTransactionResultWithSimulation { + result: Some(Err(err)), + simulation_details: Some(simulation_details), + } => Err(BanksClientError::SimulationError { + err, + logs: simulation_details.logs, + units_consumed: simulation_details.units_consumed, + }), + BanksTransactionResultWithSimulation { + result: Some(result), + simulation_details: _, + } => result.map_err(Into::into), + }) + } + + /// Send a transaction and return any preflight (sanitization or simulation) errors, or return + /// after the transaction has been finalized or rejected. + pub fn process_transaction_with_preflight( + &mut self, + transaction: Transaction, + ) -> impl Future> + '_ { + self.process_transaction_with_preflight_and_commitment( + transaction, + CommitmentLevel::default(), + ) } /// Send a transaction and return until the transaction has been finalized or rejected. @@ -255,10 +345,12 @@ impl BanksClient { address: Pubkey, ) -> impl Future> + '_ { self.get_account(address).map(|result| { - let account = - result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "Account not found"))?; + let account = result? + .ok_or(BanksClientError::ClientError("Account not found")) + .map_err(io::Error::from)?; // Remove this map when return Err type updated to BanksClientError T::unpack_from_slice(&account.data) - .map_err(|_| io::Error::new(io::ErrorKind::Other, "Failed to deserialize account")) + .map_err(|_| BanksClientError::ClientError("Failed to deserialize account")) + .map_err(Into::into) // Remove this when return Err type updated to BanksClientError }) } @@ -269,9 +361,8 @@ impl BanksClient { address: Pubkey, ) -> impl Future> + '_ { self.get_account(address).map(|result| { - let account = - result?.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found"))?; - T::try_from_slice(&account.data) + let account = result?.ok_or(BanksClientError::ClientError("Account not found"))?; + T::try_from_slice(&account.data).map_err(Into::into) }) } @@ -330,7 +421,8 @@ impl BanksClient { .map(|result| { result? .map(|x| x.0) - .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "account not found")) + .ok_or(BanksClientError::ClientError("valid blockhash not found")) + .map_err(Into::into) }) } @@ -348,6 +440,8 @@ impl BanksClient { ) -> impl Future>> + '_ { self.inner .get_latest_blockhash_with_commitment_and_context(ctx, commitment) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } pub fn get_fee_for_message_with_commitment_and_context( @@ -358,6 +452,8 @@ impl BanksClient { ) -> impl Future>> + '_ { self.inner .get_fee_for_message_with_commitment_and_context(ctx, commitment, message) + .map_err(BanksClientError::from) // Remove this when return Err type updated to BanksClientError + .map_err(Into::into) } } @@ -399,7 +495,7 @@ mod tests { } #[test] - fn test_banks_server_transfer_via_server() -> io::Result<()> { + fn test_banks_server_transfer_via_server() -> Result<(), BanksClientError> { // This test shows the preferred way to interact with BanksServer. // It creates a runtime explicitly (no globals via tokio macros) and calls // `runtime.block_on()` just once, to run all the async code. @@ -432,7 +528,7 @@ mod tests { } #[test] - fn test_banks_server_transfer_via_client() -> io::Result<()> { + fn test_banks_server_transfer_via_client() -> Result<(), BanksClientError> { // The caller may not want to hold the connection open until the transaction // is processed (or blockhash expires). In this test, we verify the // server-side functionality is available to the client. diff --git a/banks-interface/Cargo.toml b/banks-interface/Cargo.toml index bb16fa2ea4e5f6..68f3e682f0d5ec 100644 --- a/banks-interface/Cargo.toml +++ b/banks-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-interface" -version = "1.9.0" +version = "1.9.4" description = "Solana banks RPC interface" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,8 +11,8 @@ edition = "2021" [dependencies] serde = { version = "1.0.130", features = ["derive"] } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -tarpc = { version = "0.26.2", features = ["full"] } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +tarpc = { version = "0.27.2", features = ["full"] } [lib] crate-type = ["lib"] diff --git a/banks-interface/src/lib.rs b/banks-interface/src/lib.rs index ad2ff1ab488abf..597cf60167dd2c 100644 --- a/banks-interface/src/lib.rs +++ b/banks-interface/src/lib.rs @@ -30,6 +30,19 @@ pub struct TransactionStatus { pub confirmation_status: Option, } +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct TransactionSimulationDetails { + pub logs: Vec, + pub units_consumed: u64, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct BanksTransactionResultWithSimulation { + pub result: Option>, + pub simulation_details: Option, +} + #[tarpc::service] pub trait Banks { async fn send_transaction_with_context(transaction: Transaction); @@ -44,6 +57,10 @@ pub trait Banks { -> Option; async fn get_slot_with_context(commitment: CommitmentLevel) -> Slot; async fn get_block_height_with_context(commitment: CommitmentLevel) -> u64; + async fn process_transaction_with_preflight_and_commitment_and_context( + transaction: Transaction, + commitment: CommitmentLevel, + ) -> BanksTransactionResultWithSimulation; async fn process_transaction_with_commitment_and_context( transaction: Transaction, commitment: CommitmentLevel, diff --git a/banks-server/Cargo.toml b/banks-server/Cargo.toml index 070e79aa1b3b36..402ff7ebf6e6ff 100644 --- a/banks-server/Cargo.toml +++ b/banks-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-banks-server" -version = "1.9.0" +version = "1.9.4" description = "Solana banks server" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,11 +12,11 @@ edition = "2021" [dependencies] bincode = "1.3.3" futures = "0.3" -solana-banks-interface = { path = "../banks-interface", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" } -tarpc = { version = "0.26.2", features = ["full"] } +solana-banks-interface = { path = "../banks-interface", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" } +tarpc = { version = "0.27.2", features = ["full"] } tokio = { version = "1", features = ["full"] } tokio-serde = { version = "0.8", features = ["bincode"] } tokio-stream = "0.1" diff --git a/banks-server/src/banks_server.rs b/banks-server/src/banks_server.rs index d510cf59953126..6e94d244016b48 100644 --- a/banks-server/src/banks_server.rs +++ b/banks-server/src/banks_server.rs @@ -2,9 +2,14 @@ use { bincode::{deserialize, serialize}, futures::{future, prelude::stream::StreamExt}, solana_banks_interface::{ - Banks, BanksRequest, BanksResponse, TransactionConfirmationStatus, TransactionStatus, + Banks, BanksRequest, BanksResponse, BanksTransactionResultWithSimulation, + TransactionConfirmationStatus, TransactionSimulationDetails, TransactionStatus, + }, + solana_runtime::{ + bank::{Bank, TransactionSimulationResult}, + bank_forks::BankForks, + commitment::BlockCommitmentCache, }, - solana_runtime::{bank::Bank, bank_forks::BankForks, commitment::BlockCommitmentCache}, solana_sdk::{ account::Account, clock::Slot, @@ -15,7 +20,7 @@ use { message::{Message, SanitizedMessage}, pubkey::Pubkey, signature::Signature, - transaction::{self, Transaction}, + transaction::{self, SanitizedTransaction, Transaction}, }, solana_send_transaction_service::{ send_transaction_service::{SendTransactionService, TransactionInfo}, @@ -35,7 +40,7 @@ use { tarpc::{ context::Context, serde_transport::tcp, - server::{self, Channel, Incoming}, + server::{self, incoming::Incoming, Channel}, transport::{self, channel::UnboundedChannel}, ClientMessage, Response, }, @@ -242,6 +247,47 @@ impl Banks for BanksServer { self.bank(commitment).block_height() } + async fn process_transaction_with_preflight_and_commitment_and_context( + self, + ctx: Context, + transaction: Transaction, + commitment: CommitmentLevel, + ) -> BanksTransactionResultWithSimulation { + let sanitized_transaction = + match SanitizedTransaction::try_from_legacy_transaction(transaction.clone()) { + Err(err) => { + return BanksTransactionResultWithSimulation { + result: Some(Err(err)), + simulation_details: None, + }; + } + Ok(tx) => tx, + }; + if let TransactionSimulationResult { + result: Err(err), + logs, + post_simulation_accounts: _, + units_consumed, + } = self + .bank(commitment) + .simulate_transaction_unchecked(sanitized_transaction) + { + return BanksTransactionResultWithSimulation { + result: Some(Err(err)), + simulation_details: Some(TransactionSimulationDetails { + logs, + units_consumed, + }), + }; + } + BanksTransactionResultWithSimulation { + result: self + .process_transaction_with_commitment_and_context(ctx, transaction, commitment) + .await, + simulation_details: None, + } + } + async fn process_transaction_with_commitment_and_context( self, _: Context, diff --git a/bench-streamer/Cargo.toml b/bench-streamer/Cargo.toml index af18cb8f32aa33..f6fcc1ff48b81d 100644 --- a/bench-streamer/Cargo.toml +++ b/bench-streamer/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-bench-streamer" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,11 +10,11 @@ publish = false [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bench-streamer/src/main.rs b/bench-streamer/src/main.rs index bade7a943093ae..46eeeb761380e2 100644 --- a/bench-streamer/src/main.rs +++ b/bench-streamer/src/main.rs @@ -2,8 +2,8 @@ use { clap::{crate_description, crate_name, App, Arg}, solana_streamer::{ - packet::{Packet, Packets, PacketsRecycler, PACKET_DATA_SIZE}, - streamer::{receiver, PacketReceiver}, + packet::{Packet, PacketBatch, PacketBatchRecycler, PACKET_DATA_SIZE}, + streamer::{receiver, PacketBatchReceiver}, }, std::{ cmp::max, @@ -20,19 +20,19 @@ use { fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { let send = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut msgs = Packets::default(); - msgs.packets.resize(10, Packet::default()); - for w in msgs.packets.iter_mut() { + let mut packet_batch = PacketBatch::default(); + packet_batch.packets.resize(10, Packet::default()); + for w in packet_batch.packets.iter_mut() { w.meta.size = PACKET_DATA_SIZE; w.meta.set_addr(addr); } - let msgs = Arc::new(msgs); + let packet_batch = Arc::new(packet_batch); spawn(move || loop { if exit.load(Ordering::Relaxed) { return; } let mut num = 0; - for p in &msgs.packets { + for p in &packet_batch.packets { let a = p.meta.addr(); assert!(p.meta.size <= PACKET_DATA_SIZE); send.send_to(&p.data[..p.meta.size], &a).unwrap(); @@ -42,14 +42,14 @@ fn producer(addr: &SocketAddr, exit: Arc) -> JoinHandle<()> { }) } -fn sink(exit: Arc, rvs: Arc, r: PacketReceiver) -> JoinHandle<()> { +fn sink(exit: Arc, rvs: Arc, r: PacketBatchReceiver) -> JoinHandle<()> { spawn(move || loop { if exit.load(Ordering::Relaxed) { return; } let timer = Duration::new(1, 0); - if let Ok(msgs) = r.recv_timeout(timer) { - rvs.fetch_add(msgs.packets.len(), Ordering::Relaxed); + if let Ok(packet_batch) = r.recv_timeout(timer) { + rvs.fetch_add(packet_batch.packets.len(), Ordering::Relaxed); } }) } @@ -81,7 +81,7 @@ fn main() -> Result<()> { let mut read_channels = Vec::new(); let mut read_threads = Vec::new(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); for _ in 0..num_sockets { let read = solana_net_utils::bind_to(ip_addr, port, false).unwrap(); read.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); diff --git a/bench-tps/Cargo.toml b/bench-tps/Cargo.toml index c215602872cc85..48f062723cdea8 100644 --- a/bench-tps/Cargo.toml +++ b/bench-tps/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-bench-tps" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,23 +14,23 @@ log = "0.4.14" rayon = "1.5.1" serde_json = "1.0.72" serde_yaml = "0.8.21" -solana-core = { path = "../core", version = "=1.9.0" } -solana-genesis = { path = "../genesis", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-genesis = { path = "../genesis", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [dev-dependencies] serial_test = "0.5.1" -solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" } +solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bucket_map/Cargo.toml b/bucket_map/Cargo.toml index 0dff3be4a3853b..6627c05b70c9bd 100644 --- a/bucket_map/Cargo.toml +++ b/bucket_map/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bucket-map" -version = "1.9.0" +version = "1.9.4" description = "solana-bucket-map" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-bucket-map" @@ -12,11 +12,11 @@ edition = "2021" [dependencies] rayon = "1.5.0" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } memmap2 = "0.5.0" log = { version = "0.4.11" } -solana-measure = { path = "../measure", version = "=1.9.0" } +solana-measure = { path = "../measure", version = "=1.9.4" } rand = "0.7.0" fs_extra = "1.2.0" tempfile = "3.2.0" diff --git a/cargo-build-bpf b/cargo-build-bpf index 5202bba4515518..642b0da85f2692 100755 --- a/cargo-build-bpf +++ b/cargo-build-bpf @@ -9,5 +9,8 @@ for a in "$@"; do fi done -set -x +set -ex +if [[ ! -f sdk/bpf/syscalls.txt ]]; then + "$here"/cargo build --manifest-path "$here"/programs/bpf_loader/gen-syscall-list/Cargo.toml +fi exec "$here"/cargo run --manifest-path "$here"/sdk/cargo-build-bpf/Cargo.toml -- $maybe_bpf_sdk "$@" diff --git a/ci/buildkite-pipeline.sh b/ci/buildkite-pipeline.sh index c289df33541462..667a30302ad1b3 100755 --- a/ci/buildkite-pipeline.sh +++ b/ci/buildkite-pipeline.sh @@ -226,6 +226,19 @@ EOF annotate --style info \ "downstream-projects skipped as no relevant files were modified" fi + + # Wasm support + if affects \ + ^ci/test-wasm.sh \ + ^ci/test-stable.sh \ + ^sdk/ \ + ; then + command_step wasm ". ci/rust-version.sh; ci/docker-run.sh \$\$rust_stable_docker_image ci/test-wasm.sh" 20 + else + annotate --style info \ + "wasm skipped as no relevant files were modified" + fi + # Benches... if affects \ .rs$ \ diff --git a/ci/docker-rust-nightly/Dockerfile b/ci/docker-rust-nightly/Dockerfile index 2e5bac5808818c..4bd0ee82f8eef2 100644 --- a/ci/docker-rust-nightly/Dockerfile +++ b/ci/docker-rust-nightly/Dockerfile @@ -1,4 +1,4 @@ -FROM solanalabs/rust:1.56.1 +FROM solanalabs/rust:1.57.0 ARG date RUN set -x \ diff --git a/ci/docker-rust/Dockerfile b/ci/docker-rust/Dockerfile index 5d768a81dbc7bf..5b482909d55812 100644 --- a/ci/docker-rust/Dockerfile +++ b/ci/docker-rust/Dockerfile @@ -1,6 +1,6 @@ # Note: when the rust version is changed also modify # ci/rust-version.sh to pick up the new image tag -FROM rust:1.56.1 +FROM rust:1.57.0 # Add Google Protocol Buffers for Libra's metrics library. ENV PROTOC_VERSION 3.8.0 @@ -11,6 +11,7 @@ RUN set -x \ && apt-get install apt-transport-https \ && echo deb https://apt.buildkite.com/buildkite-agent stable main > /etc/apt/sources.list.d/buildkite-agent.list \ && apt-key adv --no-tty --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 32A37959C2FA5C3C99EFBC32A79206696452D198 \ + && curl -fsSL https://deb.nodesource.com/setup_current.x | bash - \ && apt update \ && apt install -y \ buildkite-agent \ @@ -19,15 +20,20 @@ RUN set -x \ lcov \ libudev-dev \ mscgen \ + nodejs \ net-tools \ rsync \ sudo \ golang \ unzip \ \ + && apt remove -y libcurl4-openssl-dev \ && rm -rf /var/lib/apt/lists/* \ + && node --version \ + && npm --version \ && rustup component add rustfmt \ && rustup component add clippy \ + && rustup target add wasm32-unknown-unknown \ && cargo install cargo-audit \ && cargo install mdbook \ && cargo install mdbook-linkcheck \ diff --git a/ci/env.sh b/ci/env.sh index 973f4c85323bf1..8d1902e063caaa 100644 --- a/ci/env.sh +++ b/ci/env.sh @@ -23,6 +23,9 @@ if [[ -n $CI ]]; then elif [[ -n $BUILDKITE ]]; then export CI_BRANCH=$BUILDKITE_BRANCH export CI_BUILD_ID=$BUILDKITE_BUILD_ID + if [[ $BUILDKITE_COMMIT = HEAD ]]; then + BUILDKITE_COMMIT="$(git rev-parse HEAD)" + fi export CI_COMMIT=$BUILDKITE_COMMIT export CI_JOB_ID=$BUILDKITE_JOB_ID # The standard BUILDKITE_PULL_REQUEST environment variable is always "false" due @@ -35,7 +38,18 @@ if [[ -n $CI ]]; then export CI_BASE_BRANCH=$BUILDKITE_BRANCH export CI_PULL_REQUEST= fi - export CI_OS_NAME=linux + + case "$(uname -s)" in + Linux) + export CI_OS_NAME=linux + ;; + Darwin) + export CI_OS_NAME=osx + ;; + *) + ;; + esac + if [[ -n $BUILDKITE_TRIGGERED_FROM_BUILD_PIPELINE_SLUG ]]; then # The solana-secondary pipeline should use the slug of the pipeline that # triggered it diff --git a/ci/publish-tarball.sh b/ci/publish-tarball.sh index ec44368e9fdef0..071afeaea99007 100755 --- a/ci/publish-tarball.sh +++ b/ci/publish-tarball.sh @@ -39,7 +39,11 @@ fi case "$CI_OS_NAME" in osx) - TARGET=x86_64-apple-darwin + _cputype="$(uname -m)" + if [[ $_cputype = arm64 ]]; then + _cputype=aarch64 + fi + TARGET=${_cputype}-apple-darwin ;; linux) TARGET=x86_64-unknown-linux-gnu diff --git a/ci/rust-version.sh b/ci/rust-version.sh index 630afbac65256c..ea0eca05246fa7 100644 --- a/ci/rust-version.sh +++ b/ci/rust-version.sh @@ -18,13 +18,13 @@ if [[ -n $RUST_STABLE_VERSION ]]; then stable_version="$RUST_STABLE_VERSION" else - stable_version=1.56.1 + stable_version=1.57.0 fi if [[ -n $RUST_NIGHTLY_VERSION ]]; then nightly_version="$RUST_NIGHTLY_VERSION" else - nightly_version=2021-11-30 + nightly_version=2021-12-03 fi diff --git a/ci/test-stable.sh b/ci/test-stable.sh index 8f36b68b888233..177175e874d28a 100755 --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -103,6 +103,19 @@ test-local-cluster) _ "$cargo" stable test --release --package solana-local-cluster ${V:+--verbose} -- --nocapture --test-threads=1 exit 0 ;; +test-wasm) + _ node --version + _ npm --version + for dir in sdk/{program,}; do + if [[ -r "$dir"/package.json ]]; then + pushd "$dir" + _ npm install + _ npm test + popd + fi + done + exit 0 + ;; *) echo "Error: Unknown test: $testName" ;; diff --git a/ci/test-wasm.sh b/ci/test-wasm.sh new file mode 120000 index 00000000000000..0c92a5c7bd6fd4 --- /dev/null +++ b/ci/test-wasm.sh @@ -0,0 +1 @@ +test-stable.sh \ No newline at end of file diff --git a/ci/upload-ci-artifact.sh b/ci/upload-ci-artifact.sh index 233c32503e154b..0bfb93c6a83512 100644 --- a/ci/upload-ci-artifact.sh +++ b/ci/upload-ci-artifact.sh @@ -19,13 +19,24 @@ upload-ci-artifact() { upload-s3-artifact() { echo "--- artifact: $1 to $2" ( - set -x - docker run \ - --rm \ - --env AWS_ACCESS_KEY_ID \ - --env AWS_SECRET_ACCESS_KEY \ - --volume "$PWD:/solana" \ - eremite/aws-cli:2018.12.18 \ + args=( + --rm + --env AWS_ACCESS_KEY_ID + --env AWS_SECRET_ACCESS_KEY + --volume "$PWD:/solana" + + ) + if [[ $(uname -m) = arm64 ]]; then + # Ref: https://blog.jaimyn.dev/how-to-build-multi-architecture-docker-images-on-an-m1-mac/#tldr + args+=( + --platform linux/amd64 + ) + fi + args+=( + eremite/aws-cli:2018.12.18 /usr/bin/s3cmd --acl-public put "$1" "$2" + ) + set -x + docker run "${args[@]}" ) } diff --git a/clap-utils/Cargo.toml b/clap-utils/Cargo.toml index 564c05ffa05312..9ab777e0714a8b 100644 --- a/clap-utils/Cargo.toml +++ b/clap-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-clap-utils" -version = "1.9.0" +version = "1.9.4" description = "Solana utilities for the clap" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,9 +12,9 @@ edition = "2021" [dependencies] clap = "2.33.0" rpassword = "5.0" -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } thiserror = "1.0.30" tiny-bip39 = "0.8.2" uriparse = "0.6.3" diff --git a/cli-config/Cargo.toml b/cli-config/Cargo.toml index f93f2263bd215a..ff17e17e2383ae 100644 --- a/cli-config/Cargo.toml +++ b/cli-config/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-cli-config" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" diff --git a/cli-output/Cargo.toml b/cli-output/Cargo.toml index 143d2571f00257..b883996a523691 100644 --- a/cli-output/Cargo.toml +++ b/cli-output/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-cli-output" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -19,12 +19,12 @@ Inflector = "0.11.4" indicatif = "0.16.2" serde = "1.0.130" serde_json = "1.0.72" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } [package.metadata.docs.rs] diff --git a/cli-output/src/cli_output.rs b/cli-output/src/cli_output.rs index 16f7741756b242..26a45b760678fd 100644 --- a/cli-output/src/cli_output.rs +++ b/cli-output/src/cli_output.rs @@ -99,7 +99,7 @@ impl OutputFormat { pub struct CliAccount { #[serde(flatten)] pub keyed_account: RpcKeyedAccount, - #[serde(skip_serializing)] + #[serde(skip_serializing, skip_deserializing)] pub use_lamports_unit: bool, } diff --git a/cli-output/src/display.rs b/cli-output/src/display.rs index 88c99f50fab990..6e2e5b13d9dd32 100644 --- a/cli-output/src/display.rs +++ b/cli-output/src/display.rs @@ -139,7 +139,7 @@ fn format_account_mode(message: &Message, index: usize) -> String { } else { "-" }, - if message.is_writable(index, /*demote_program_write_locks=*/ true) { + if message.is_writable(index) { "w" // comment for consistent rust fmt (no joking; lol) } else { "-" diff --git a/cli/Cargo.toml b/cli/Cargo.toml index e369af30a13efa..6a45274699d119 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-cli" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -26,29 +26,29 @@ semver = "1.0.4" serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-cli-output = { path = "../cli-output", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-config-program = { path = "../programs/config", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" } -solana_rbpf = "=0.2.16" -solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.4" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-cli-output = { path = "../cli-output", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-config-program = { path = "../programs/config", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" } +solana_rbpf = "=0.2.21" +solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0.30" tiny-bip39 = "0.8.2" [dev-dependencies] -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-test-validator = { path = "../test-validator", version = "=1.9.0" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-test-validator = { path = "../test-validator", version = "=1.9.4" } tempfile = "3.2.0" [[bin]] diff --git a/cli/src/checks.rs b/cli/src/checks.rs index e60bed798551da..d3cf03a5480abf 100644 --- a/cli/src/checks.rs +++ b/cli/src/checks.rs @@ -98,10 +98,7 @@ pub fn get_fee_for_messages( ) -> Result { Ok(messages .iter() - .map(|message| { - println!("msg {:?}", message.recent_blockhash); - rpc_client.get_fee_for_message(message) - }) + .map(|message| rpc_client.get_fee_for_message(message)) .collect::, _>>()? .iter() .sum()) diff --git a/cli/src/cli.rs b/cli/src/cli.rs index 85f81bc999efe1..f599fa41e2cc28 100644 --- a/cli/src/cli.rs +++ b/cli/src/cli.rs @@ -298,7 +298,13 @@ pub enum CliCommand { authorized_voter: Option, authorized_withdrawer: Pubkey, commission: u8, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option, + fee_payer: SignerIndex, }, ShowVoteAccount { pubkey: Pubkey, @@ -310,19 +316,32 @@ pub enum CliCommand { destination_account_pubkey: Pubkey, withdraw_authority: SignerIndex, withdraw_amount: SpendAmount, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option, + fee_payer: SignerIndex, }, CloseVoteAccount { vote_account_pubkey: Pubkey, destination_account_pubkey: Pubkey, withdraw_authority: SignerIndex, memo: Option, + fee_payer: SignerIndex, }, VoteAuthorize { vote_account_pubkey: Pubkey, new_authorized_pubkey: Pubkey, vote_authorize: VoteAuthorize, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option, + fee_payer: SignerIndex, authorized: SignerIndex, new_authorized: Option, }, @@ -330,13 +349,25 @@ pub enum CliCommand { vote_account_pubkey: Pubkey, new_identity_account: SignerIndex, withdraw_authority: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option, + fee_payer: SignerIndex, }, VoteUpdateCommission { vote_account_pubkey: Pubkey, commission: u8, withdraw_authority: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option, + fee_payer: SignerIndex, }, // Wallet Commands Address, @@ -1384,7 +1415,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { authorized_voter, authorized_withdrawer, commission, + sign_only, + dump_transaction_message, + blockhash_query, + ref nonce_account, + nonce_authority, memo, + fee_payer, } => process_create_vote_account( &rpc_client, config, @@ -1394,7 +1431,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { authorized_voter, *authorized_withdrawer, *commission, + *sign_only, + *dump_transaction_message, + blockhash_query, + nonce_account.as_ref(), + *nonce_authority, memo.as_ref(), + *fee_payer, ), CliCommand::ShowVoteAccount { pubkey: vote_account_pubkey, @@ -1412,7 +1455,13 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { withdraw_authority, withdraw_amount, destination_account_pubkey, + sign_only, + dump_transaction_message, + blockhash_query, + ref nonce_account, + nonce_authority, memo, + fee_payer, } => process_withdraw_from_vote_account( &rpc_client, config, @@ -1420,13 +1469,20 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *withdraw_authority, *withdraw_amount, destination_account_pubkey, + *sign_only, + *dump_transaction_message, + blockhash_query, + nonce_account.as_ref(), + *nonce_authority, memo.as_ref(), + *fee_payer, ), CliCommand::CloseVoteAccount { vote_account_pubkey, withdraw_authority, destination_account_pubkey, memo, + fee_payer, } => process_close_vote_account( &rpc_client, config, @@ -1434,12 +1490,19 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *withdraw_authority, destination_account_pubkey, memo.as_ref(), + *fee_payer, ), CliCommand::VoteAuthorize { vote_account_pubkey, new_authorized_pubkey, vote_authorize, + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority, memo, + fee_payer, authorized, new_authorized, } => process_vote_authorize( @@ -1450,33 +1513,63 @@ pub fn process_command(config: &CliConfig) -> ProcessResult { *vote_authorize, *authorized, *new_authorized, + *sign_only, + *dump_transaction_message, + blockhash_query, + *nonce_account, + *nonce_authority, memo.as_ref(), + *fee_payer, ), CliCommand::VoteUpdateValidator { vote_account_pubkey, new_identity_account, withdraw_authority, + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority, memo, + fee_payer, } => process_vote_update_validator( &rpc_client, config, vote_account_pubkey, *new_identity_account, *withdraw_authority, + *sign_only, + *dump_transaction_message, + blockhash_query, + *nonce_account, + *nonce_authority, memo.as_ref(), + *fee_payer, ), CliCommand::VoteUpdateCommission { vote_account_pubkey, commission, withdraw_authority, + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority, memo, + fee_payer, } => process_vote_update_commission( &rpc_client, config, vote_account_pubkey, *commission, *withdraw_authority, + *sign_only, + *dump_transaction_message, + blockhash_query, + *nonce_account, + *nonce_authority, memo.as_ref(), + *fee_payer, ), // Wallet Commands @@ -1975,7 +2068,13 @@ mod tests { authorized_voter: Some(bob_pubkey), authorized_withdrawer: bob_pubkey, commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; config.signers = vec![&keypair, &bob_keypair, &identity_keypair]; let result = process_command(&config); @@ -2006,7 +2105,13 @@ mod tests { vote_account_pubkey: bob_pubkey, new_authorized_pubkey, vote_authorize: VoteAuthorize::Withdrawer, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 0, new_authorized: None, }; @@ -2019,7 +2124,13 @@ mod tests { vote_account_pubkey: bob_pubkey, new_identity_account: 2, withdraw_authority: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; let result = process_command(&config); assert!(result.is_ok()); @@ -2195,7 +2306,13 @@ mod tests { authorized_voter: Some(bob_pubkey), authorized_withdrawer: bob_pubkey, commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; config.signers = vec![&keypair, &bob_keypair, &identity_keypair]; assert!(process_command(&config).is_err()); @@ -2204,7 +2321,13 @@ mod tests { vote_account_pubkey: bob_pubkey, new_authorized_pubkey: bob_pubkey, vote_authorize: VoteAuthorize::Voter, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 0, new_authorized: None, }; @@ -2214,7 +2337,13 @@ mod tests { vote_account_pubkey: bob_pubkey, new_identity_account: 1, withdraw_authority: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; assert!(process_command(&config).is_err()); diff --git a/cli/src/feature.rs b/cli/src/feature.rs index d2dd5b6f4458db..6b16fc2f587f7d 100644 --- a/cli/src/feature.rs +++ b/cli/src/feature.rs @@ -5,7 +5,7 @@ use { }, clap::{App, AppSettings, Arg, ArgMatches, SubCommand}, console::style, - serde::{Deserialize, Serialize}, + serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_clap_utils::{input_parsers::*, input_validators::*, keypair::*}, solana_cli_output::{QuietDisplay, VerboseDisplay}, solana_client::{client_error::ClientError, rpc_client::RpcClient}, @@ -23,6 +23,7 @@ use { cmp::Ordering, collections::{HashMap, HashSet}, fmt, + str::FromStr, sync::Arc, }, }; @@ -45,7 +46,7 @@ pub enum FeatureCliCommand { }, } -#[derive(Serialize, Deserialize)] +#[derive(Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase", tag = "status", content = "sinceSlot")] pub enum CliFeatureStatus { Inactive, @@ -53,7 +54,29 @@ pub enum CliFeatureStatus { Active(Slot), } -#[derive(Serialize, Deserialize)] +impl PartialOrd for CliFeatureStatus { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CliFeatureStatus { + fn cmp(&self, other: &Self) -> Ordering { + match (self, other) { + (Self::Inactive, Self::Inactive) => Ordering::Equal, + (Self::Inactive, _) => Ordering::Greater, + (_, Self::Inactive) => Ordering::Less, + (Self::Pending, Self::Pending) => Ordering::Equal, + (Self::Pending, _) => Ordering::Greater, + (_, Self::Pending) => Ordering::Less, + (Self::Active(self_active_slot), Self::Active(other_active_slot)) => { + self_active_slot.cmp(other_active_slot) + } + } + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq)] #[serde(rename_all = "camelCase")] pub struct CliFeature { pub id: String, @@ -62,11 +85,28 @@ pub struct CliFeature { pub status: CliFeatureStatus, } +impl PartialOrd for CliFeature { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for CliFeature { + fn cmp(&self, other: &Self) -> Ordering { + match self.status.cmp(&other.status) { + Ordering::Equal => self.id.cmp(&other.id), + ordering => ordering, + } + } +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CliFeatures { pub features: Vec, pub feature_activation_allowed: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub cluster_feature_sets: Option, #[serde(skip)] pub inactive: bool, } @@ -93,11 +133,16 @@ impl fmt::Display for CliFeatures { CliFeatureStatus::Inactive => style("inactive".to_string()).red(), CliFeatureStatus::Pending => style("activation pending".to_string()).yellow(), CliFeatureStatus::Active(activation_slot) => - style(format!("active since slot {}", activation_slot)).green(), + style(format!("active since slot {:>9}", activation_slot)).green(), }, feature.description, )?; } + + if let Some(feature_sets) = &self.cluster_feature_sets { + write!(f, "{}", feature_sets)?; + } + if self.inactive && !self.feature_activation_allowed { writeln!( f, @@ -114,6 +159,191 @@ impl fmt::Display for CliFeatures { impl QuietDisplay for CliFeatures {} impl VerboseDisplay for CliFeatures {} +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliClusterFeatureSets { + pub tool_feature_set: u32, + pub feature_sets: Vec, + #[serde(skip)] + pub stake_allowed: bool, + #[serde(skip)] + pub rpc_allowed: bool, +} + +impl fmt::Display for CliClusterFeatureSets { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut tool_feature_set_matches_cluster = false; + + let software_versions_title = "Software Version"; + let feature_set_title = "Feature Set"; + let stake_percent_title = "Stake"; + let rpc_percent_title = "RPC"; + let mut max_software_versions_len = software_versions_title.len(); + let mut max_feature_set_len = feature_set_title.len(); + let mut max_stake_percent_len = stake_percent_title.len(); + let mut max_rpc_percent_len = rpc_percent_title.len(); + + let feature_sets: Vec<_> = self + .feature_sets + .iter() + .map(|feature_set_info| { + let me = if self.tool_feature_set == feature_set_info.feature_set { + tool_feature_set_matches_cluster = true; + true + } else { + false + }; + let software_versions: Vec<_> = feature_set_info + .software_versions + .iter() + .map(ToString::to_string) + .collect(); + let software_versions = software_versions.join(", "); + let feature_set = if feature_set_info.feature_set == 0 { + "unknown".to_string() + } else { + feature_set_info.feature_set.to_string() + }; + let stake_percent = format!("{:.2}%", feature_set_info.stake_percent); + let rpc_percent = format!("{:.2}%", feature_set_info.rpc_percent); + + max_software_versions_len = max_software_versions_len.max(software_versions.len()); + max_feature_set_len = max_feature_set_len.max(feature_set.len()); + max_stake_percent_len = max_stake_percent_len.max(stake_percent.len()); + max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len()); + + ( + software_versions, + feature_set, + stake_percent, + rpc_percent, + me, + ) + }) + .collect(); + + if !tool_feature_set_matches_cluster { + writeln!( + f, + "\n{}", + style("To activate features the tool and cluster feature sets must match, select a tool version that matches the cluster") + .bold())?; + } else { + if !self.stake_allowed { + write!( + f, + "\n{}", + style("To activate features the stake must be >= 95%") + .bold() + .red() + )?; + } + if !self.rpc_allowed { + write!( + f, + "\n{}", + style("To activate features the RPC nodes must be >= 95%") + .bold() + .red() + )?; + } + } + writeln!( + f, + "\n\n{}", + style(format!("Tool Feature Set: {}", self.tool_feature_set)).bold() + )?; + writeln!( + f, + "{}", + style(format!( + "{1:<0$} {3:<2$} {5:<4$} {7:<6$}", + max_software_versions_len, + software_versions_title, + max_feature_set_len, + feature_set_title, + max_stake_percent_len, + stake_percent_title, + max_rpc_percent_len, + rpc_percent_title, + )) + .bold(), + )?; + for (software_versions, feature_set, stake_percent, rpc_percent, me) in feature_sets { + writeln!( + f, + "{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}", + max_software_versions_len, + software_versions, + max_feature_set_len, + feature_set, + max_stake_percent_len, + stake_percent, + max_rpc_percent_len, + rpc_percent, + if me { "<-- me" } else { "" }, + )?; + } + writeln!(f) + } +} + +impl QuietDisplay for CliClusterFeatureSets {} +impl VerboseDisplay for CliClusterFeatureSets {} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliFeatureSet { + software_versions: Vec, + feature_set: u32, + stake_percent: f64, + rpc_percent: f32, +} + +#[derive(Eq, PartialEq, Ord, PartialOrd)] +struct CliVersion(Option); + +impl fmt::Display for CliVersion { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let s = match &self.0 { + None => "unknown".to_string(), + Some(version) => version.to_string(), + }; + write!(f, "{}", s) + } +} + +impl FromStr for CliVersion { + type Err = semver::Error; + fn from_str(s: &str) -> Result { + let version_option = if s == "unknown" { + None + } else { + Some(semver::Version::from_str(s)?) + }; + Ok(CliVersion(version_option)) + } +} + +impl Serialize for CliVersion { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl<'de> Deserialize<'de> for CliVersion { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s: &str = Deserialize::deserialize(deserializer)?; + CliVersion::from_str(s).map_err(serde::de::Error::custom) + } +} + pub trait FeatureSubCommands { fn feature_subcommands(self) -> Self; } @@ -330,7 +560,10 @@ fn feature_set_stats(rpc_client: &RpcClient) -> Result Result { +fn feature_activation_allowed( + rpc_client: &RpcClient, + quiet: bool, +) -> Result<(bool, Option), ClientError> { let my_feature_set = solana_version::Version::default().feature_set; let feature_set_stats = feature_set_stats(rpc_client)?; @@ -346,54 +579,43 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result= 95%") - .bold() - .red() - ); - } - if !rpc_allowed { - print!( - "\n{}", - style("To activate features the RPC nodes must be >= 95%") - .bold() - .red() - ); - } - } - println!( - "\n\n{}", - style(format!("Tool Feature Set: {}", my_feature_set)).bold() - ); - - let mut feature_set_stats = feature_set_stats.into_iter().collect::>(); - feature_set_stats.sort_by(|l, r| { - match l.1.software_versions[0] - .cmp(&r.1.software_versions[0]) + let cluster_feature_sets = if quiet { + None + } else { + let mut feature_sets = feature_set_stats + .into_iter() + .map( + |( + feature_set, + FeatureSetStatsEntry { + stake_percent, + rpc_nodes_percent: rpc_percent, + software_versions, + }, + )| { + CliFeatureSet { + software_versions: software_versions.into_iter().map(CliVersion).collect(), + feature_set, + stake_percent, + rpc_percent, + } + }, + ) + .collect::>(); + feature_sets.sort_by(|l, r| { + match l.software_versions[0] + .cmp(&r.software_versions[0]) .reverse() { Ordering::Equal => { match l - .1 .stake_percent - .partial_cmp(&r.1.stake_percent) + .partial_cmp(&r.stake_percent) .unwrap() .reverse() { Ordering::Equal => { - l.1.rpc_nodes_percent - .partial_cmp(&r.1.rpc_nodes_percent) - .unwrap() - .reverse() + l.rpc_percent.partial_cmp(&r.rpc_percent).unwrap().reverse() } o => o, } @@ -401,96 +623,15 @@ fn feature_activation_allowed(rpc_client: &RpcClient, quiet: bool) -> Result o, } }); + Some(CliClusterFeatureSets { + tool_feature_set: my_feature_set, + feature_sets, + stake_allowed, + rpc_allowed, + }) + }; - let software_versions_title = "Software Version"; - let feature_set_title = "Feature Set"; - let stake_percent_title = "Stake"; - let rpc_percent_title = "RPC"; - let mut stats_output = Vec::new(); - let mut max_software_versions_len = software_versions_title.len(); - let mut max_feature_set_len = feature_set_title.len(); - let mut max_stake_percent_len = stake_percent_title.len(); - let mut max_rpc_percent_len = rpc_percent_title.len(); - for ( - feature_set, - FeatureSetStatsEntry { - stake_percent, - rpc_nodes_percent, - software_versions, - }, - ) in feature_set_stats.into_iter() - { - let me = feature_set == my_feature_set; - let feature_set = if feature_set == 0 { - "unknown".to_string() - } else { - feature_set.to_string() - }; - let stake_percent = format!("{:.2}%", stake_percent); - let rpc_percent = format!("{:.2}%", rpc_nodes_percent); - - let mut has_unknown = false; - let mut software_versions = software_versions - .iter() - .filter_map(|v| { - if v.is_none() { - has_unknown = true; - } - v.as_ref() - }) - .map(ToString::to_string) - .collect::>(); - if has_unknown { - software_versions.push("unknown".to_string()); - } - let software_versions = software_versions.join(", "); - max_software_versions_len = max_software_versions_len.max(software_versions.len()); - - max_feature_set_len = max_feature_set_len.max(feature_set.len()); - max_stake_percent_len = max_stake_percent_len.max(stake_percent.len()); - max_rpc_percent_len = max_rpc_percent_len.max(rpc_percent.len()); - - stats_output.push(( - software_versions, - feature_set, - stake_percent, - rpc_percent, - me, - )); - } - println!( - "{}", - style(format!( - "{1:<0$} {3:<2$} {5:<4$} {7:<6$}", - max_software_versions_len, - software_versions_title, - max_feature_set_len, - feature_set_title, - max_stake_percent_len, - stake_percent_title, - max_rpc_percent_len, - rpc_percent_title, - )) - .bold(), - ); - for (software_versions, feature_set, stake_percent, rpc_percent, me) in stats_output { - println!( - "{1:<0$} {3:>2$} {5:>4$} {7:>6$} {8}", - max_software_versions_len, - software_versions, - max_feature_set_len, - feature_set, - max_stake_percent_len, - stake_percent, - max_rpc_percent_len, - rpc_percent, - if me { "<-- me" } else { "" }, - ); - } - println!(); - } - - Ok(stake_allowed && rpc_allowed) + Ok((stake_allowed && rpc_allowed, cluster_feature_sets)) } fn status_from_account(account: Account) -> Option { @@ -550,10 +691,14 @@ fn process_status( }); } - let feature_activation_allowed = feature_activation_allowed(rpc_client, features.len() <= 1)?; + features.sort_unstable(); + + let (feature_activation_allowed, cluster_feature_sets) = + feature_activation_allowed(rpc_client, features.len() <= 1)?; let feature_set = CliFeatures { features, feature_activation_allowed, + cluster_feature_sets, inactive, }; Ok(config.output_format.formatted_string(&feature_set)) @@ -577,7 +722,7 @@ fn process_activate( } } - if !feature_activation_allowed(rpc_client, false)? { + if !feature_activation_allowed(rpc_client, false)?.0 { match force { ForceActivation::Almost => return Err("Add force argument once more to override the sanity check to force feature activation ".into()), diff --git a/cli/src/spend_utils.rs b/cli/src/spend_utils.rs index b678e6e3502b46..a8c0230bd21b21 100644 --- a/cli/src/spend_utils.rs +++ b/cli/src/spend_utils.rs @@ -16,6 +16,7 @@ use { pub enum SpendAmount { All, Some(u64), + RentExempt, } impl Default for SpendAmount { @@ -90,6 +91,7 @@ where 0, from_pubkey, fee_pubkey, + 0, build_message, )?; Ok((message, spend)) @@ -97,6 +99,12 @@ where let from_balance = rpc_client .get_balance_with_commitment(from_pubkey, commitment)? .value; + let from_rent_exempt_minimum = if amount == SpendAmount::RentExempt { + let data = rpc_client.get_account_data(from_pubkey)?; + rpc_client.get_minimum_balance_for_rent_exemption(data.len())? + } else { + 0 + }; let (message, SpendAndFee { spend, fee }) = resolve_spend_message( rpc_client, amount, @@ -104,6 +112,7 @@ where from_balance, from_pubkey, fee_pubkey, + from_rent_exempt_minimum, build_message, )?; if from_pubkey == fee_pubkey { @@ -140,6 +149,7 @@ fn resolve_spend_message( from_balance: u64, from_pubkey: &Pubkey, fee_pubkey: &Pubkey, + from_rent_exempt_minimum: u64, build_message: F, ) -> Result<(Message, SpendAndFee), CliError> where @@ -176,5 +186,20 @@ where }, )) } + SpendAmount::RentExempt => { + let mut lamports = if from_pubkey == fee_pubkey { + from_balance.saturating_sub(fee) + } else { + from_balance + }; + lamports = lamports.saturating_sub(from_rent_exempt_minimum); + Ok(( + build_message(lamports), + SpendAndFee { + spend: lamports, + fee, + }, + )) + } } } diff --git a/cli/src/vote.rs b/cli/src/vote.rs index d616bd9444752d..1e4607c0fe52a5 100644 --- a/cli/src/vote.rs +++ b/cli/src/vote.rs @@ -6,18 +6,28 @@ use { ProcessResult, }, memo::WithMemo, - spend_utils::{resolve_spend_tx_and_check_account_balance, SpendAmount}, + nonce::check_nonce_account, + spend_utils::{resolve_spend_tx_and_check_account_balances, SpendAmount}, stake::check_current_authority, }, clap::{value_t_or_exit, App, Arg, ArgMatches, SubCommand}, solana_clap_utils::{ + fee_payer::{fee_payer_arg, FEE_PAYER_ARG}, input_parsers::*, input_validators::*, keypair::{DefaultSigner, SignerIndex}, memo::{memo_arg, MEMO_ARG}, + nonce::*, + offline::*, + }, + solana_cli_output::{ + return_signers_with_config, CliEpochVotingHistory, CliLockout, CliVoteAccount, + ReturnSignersConfig, + }, + solana_client::{ + blockhash_query::BlockhashQuery, nonce_utils, rpc_client::RpcClient, + rpc_config::RpcGetVoteAccountsConfig, }, - solana_cli_output::{CliEpochVotingHistory, CliLockout, CliVoteAccount}, - solana_client::{rpc_client::RpcClient, rpc_config::RpcGetVoteAccountsConfig}, solana_remote_wallet::remote_wallet::RemoteWalletManager, solana_sdk::{ account::Account, commitment_config::CommitmentConfig, message::Message, @@ -96,6 +106,9 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .help("Seed for address generation; if specified, the resulting account will be at a derived address of the VOTE ACCOUNT pubkey") ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -123,6 +136,9 @@ impl VoteSubCommands for App<'_, '_> { .required(true), "New authorized vote signer. "), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -150,6 +166,9 @@ impl VoteSubCommands for App<'_, '_> { .required(true), "New authorized withdrawer. "), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -179,6 +198,9 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("New authorized vote signer."), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -208,6 +230,9 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("New authorized withdrawer."), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -238,6 +263,9 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer keypair"), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -268,6 +296,9 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer keypair"), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg()) ) .subcommand( @@ -328,7 +359,7 @@ impl VoteSubCommands for App<'_, '_> { .takes_value(true) .required(true) .validator(is_amount_or_all) - .help("The amount to withdraw, in SOL; accepts keyword ALL"), + .help("The amount to withdraw, in SOL; accepts keyword ALL, which for this command means account balance minus rent-exempt minimum"), ) .arg( Arg::with_name("authorized_withdrawer") @@ -338,6 +369,9 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer [default: cli config keypair]"), ) + .offline_args() + .nonce_args(false) + .arg(fee_payer_arg()) .arg(memo_arg() ) ) @@ -366,6 +400,7 @@ impl VoteSubCommands for App<'_, '_> { .validator(is_valid_signer) .help("Authorized withdrawer [default: cli config keypair]"), ) + .arg(fee_payer_arg()) .arg(memo_arg() ) ) @@ -386,7 +421,14 @@ pub fn parse_create_vote_account( let authorized_withdrawer = pubkey_of_signer(matches, "authorized_withdrawer", wallet_manager)?.unwrap(); let allow_unsafe = matches.is_present("allow_unsafe_authorized_withdrawer"); + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + let nonce_account = pubkey_of_signer(matches, NONCE_ARG.name, wallet_manager)?; let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let (nonce_authority, nonce_authority_pubkey) = + signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; if !allow_unsafe { if authorized_withdrawer == vote_account_pubkey.unwrap() { @@ -405,12 +447,12 @@ pub fn parse_create_vote_account( } } - let payer_provided = None; - let signer_info = default_signer.generate_unique_signers( - vec![payer_provided, vote_account, identity_account], - matches, - wallet_manager, - )?; + let mut bulk_signers = vec![fee_payer, vote_account, identity_account]; + if nonce_account.is_some() { + bulk_signers.push(nonce_authority); + } + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; Ok(CliCommandInfo { command: CliCommand::CreateVoteAccount { @@ -420,7 +462,13 @@ pub fn parse_create_vote_account( authorized_voter, authorized_withdrawer, commission, + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, }) @@ -437,27 +485,43 @@ pub fn parse_vote_authorize( pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap(); let (authorized, authorized_pubkey) = signer_of(matches, "authorized", wallet_manager)?; - let payer_provided = None; - let mut signers = vec![payer_provided, authorized]; + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + let nonce_account = pubkey_of(matches, NONCE_ARG.name); + let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let (nonce_authority, nonce_authority_pubkey) = + signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![fee_payer, authorized]; let new_authorized_pubkey = if checked { let (new_authorized_signer, new_authorized_pubkey) = signer_of(matches, "new_authorized", wallet_manager)?; - signers.push(new_authorized_signer); + bulk_signers.push(new_authorized_signer); new_authorized_pubkey.unwrap() } else { pubkey_of_signer(matches, "new_authorized_pubkey", wallet_manager)?.unwrap() }; - - let signer_info = default_signer.generate_unique_signers(signers, matches, wallet_manager)?; - let memo = matches.value_of(MEMO_ARG.name).map(String::from); + if nonce_account.is_some() { + bulk_signers.push(nonce_authority); + } + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; Ok(CliCommandInfo { command: CliCommand::VoteAuthorize { vote_account_pubkey, new_authorized_pubkey, vote_authorize, + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), authorized: signer_info.index_of(authorized_pubkey).unwrap(), new_authorized: if checked { signer_info.index_of(Some(new_authorized_pubkey)) @@ -481,20 +545,34 @@ pub fn parse_vote_update_validator( let (authorized_withdrawer, authorized_withdrawer_pubkey) = signer_of(matches, "authorized_withdrawer", wallet_manager)?; - let payer_provided = None; - let signer_info = default_signer.generate_unique_signers( - vec![payer_provided, authorized_withdrawer, new_identity_account], - matches, - wallet_manager, - )?; + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + let nonce_account = pubkey_of(matches, NONCE_ARG.name); let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let (nonce_authority, nonce_authority_pubkey) = + signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![fee_payer, authorized_withdrawer, new_identity_account]; + if nonce_account.is_some() { + bulk_signers.push(nonce_authority); + } + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; Ok(CliCommandInfo { command: CliCommand::VoteUpdateValidator { vote_account_pubkey, new_identity_account: signer_info.index_of(new_identity_pubkey).unwrap(), withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(), + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, }) @@ -511,20 +589,34 @@ pub fn parse_vote_update_commission( signer_of(matches, "authorized_withdrawer", wallet_manager)?; let commission = value_t_or_exit!(matches, "commission", u8); - let payer_provided = None; - let signer_info = default_signer.generate_unique_signers( - vec![payer_provided, authorized_withdrawer], - matches, - wallet_manager, - )?; + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + let nonce_account = pubkey_of(matches, NONCE_ARG.name); let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let (nonce_authority, nonce_authority_pubkey) = + signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![fee_payer, authorized_withdrawer]; + if nonce_account.is_some() { + bulk_signers.push(nonce_authority); + } + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; Ok(CliCommandInfo { command: CliCommand::VoteUpdateCommission { vote_account_pubkey, commission, withdraw_authority: signer_info.index_of(authorized_withdrawer_pubkey).unwrap(), + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, }) @@ -561,18 +653,32 @@ pub fn parse_withdraw_from_vote_account( pubkey_of_signer(matches, "vote_account_pubkey", wallet_manager)?.unwrap(); let destination_account_pubkey = pubkey_of_signer(matches, "destination_account_pubkey", wallet_manager)?.unwrap(); - let withdraw_amount = SpendAmount::new_from_matches(matches, "amount"); + let mut withdraw_amount = SpendAmount::new_from_matches(matches, "amount"); + // As a safeguard for vote accounts for running validators, `ALL` withdraws only the amount in + // excess of the rent-exempt minimum. In order to close the account with this subcommand, a + // validator must specify the withdrawal amount precisely. + if withdraw_amount == SpendAmount::All { + withdraw_amount = SpendAmount::RentExempt; + } let (withdraw_authority, withdraw_authority_pubkey) = signer_of(matches, "authorized_withdrawer", wallet_manager)?; - let payer_provided = None; - let signer_info = default_signer.generate_unique_signers( - vec![payer_provided, withdraw_authority], - matches, - wallet_manager, - )?; + let sign_only = matches.is_present(SIGN_ONLY_ARG.name); + let dump_transaction_message = matches.is_present(DUMP_TRANSACTION_MESSAGE.name); + let blockhash_query = BlockhashQuery::new_from_matches(matches); + let nonce_account = pubkey_of(matches, NONCE_ARG.name); let memo = matches.value_of(MEMO_ARG.name).map(String::from); + let (nonce_authority, nonce_authority_pubkey) = + signer_of(matches, NONCE_AUTHORITY_ARG.name, wallet_manager)?; + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; + + let mut bulk_signers = vec![fee_payer, withdraw_authority]; + if nonce_account.is_some() { + bulk_signers.push(nonce_authority); + } + let signer_info = + default_signer.generate_unique_signers(bulk_signers, matches, wallet_manager)?; Ok(CliCommandInfo { command: CliCommand::WithdrawFromVoteAccount { @@ -580,7 +686,13 @@ pub fn parse_withdraw_from_vote_account( destination_account_pubkey, withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(), withdraw_amount, + sign_only, + dump_transaction_message, + blockhash_query, + nonce_account, + nonce_authority: signer_info.index_of(nonce_authority_pubkey).unwrap(), memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, }) @@ -598,10 +710,10 @@ pub fn parse_close_vote_account( let (withdraw_authority, withdraw_authority_pubkey) = signer_of(matches, "authorized_withdrawer", wallet_manager)?; + let (fee_payer, fee_payer_pubkey) = signer_of(matches, FEE_PAYER_ARG.name, wallet_manager)?; - let payer_provided = None; let signer_info = default_signer.generate_unique_signers( - vec![payer_provided, withdraw_authority], + vec![fee_payer, withdraw_authority], matches, wallet_manager, )?; @@ -613,11 +725,13 @@ pub fn parse_close_vote_account( destination_account_pubkey, withdraw_authority: signer_info.index_of(withdraw_authority_pubkey).unwrap(), memo, + fee_payer: signer_info.index_of(fee_payer_pubkey).unwrap(), }, signers: signer_info.signers, }) } +#[allow(clippy::too_many_arguments)] pub fn process_create_vote_account( rpc_client: &RpcClient, config: &CliConfig, @@ -627,7 +741,13 @@ pub fn process_create_vote_account( authorized_voter: &Option, authorized_withdrawer: Pubkey, commission: u8, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, + nonce_account: Option<&Pubkey>, + nonce_authority: SignerIndex, memo: Option<&String>, + fee_payer: SignerIndex, ) -> ProcessResult { let vote_account = config.signers[vote_account]; let vote_account_pubkey = vote_account.pubkey(); @@ -653,6 +773,9 @@ pub fn process_create_vote_account( .max(1); let amount = SpendAmount::Some(required_balance); + let fee_payer = config.signers[fee_payer]; + let nonce_authority = config.signers[nonce_authority]; + let build_message = |lamports| { let vote_init = VoteInit { node_pubkey: identity_pubkey, @@ -680,42 +803,76 @@ pub fn process_create_vote_account( ) .with_memo(memo) }; - Message::new(&ixs, Some(&config.signers[0].pubkey())) - }; - - if let Ok(response) = - rpc_client.get_account_with_commitment(&vote_account_address, config.commitment) - { - if let Some(vote_account) = response.value { - let err_msg = if vote_account.owner == solana_vote_program::id() { - format!("Vote account {} already exists", vote_account_address) - } else { - format!( - "Account {} already exists and is not a vote account", - vote_account_address - ) - }; - return Err(CliError::BadParameter(err_msg).into()); + if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) } - } + }; - let latest_blockhash = rpc_client.get_latest_blockhash()?; + let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; - let (message, _) = resolve_spend_tx_and_check_account_balance( + let (message, _) = resolve_spend_tx_and_check_account_balances( rpc_client, - false, + sign_only, amount, - &latest_blockhash, + &recent_blockhash, &config.signers[0].pubkey(), + &fee_payer.pubkey(), build_message, config.commitment, )?; + + if !sign_only { + if let Ok(response) = + rpc_client.get_account_with_commitment(&vote_account_address, config.commitment) + { + if let Some(vote_account) = response.value { + let err_msg = if vote_account.owner == solana_vote_program::id() { + format!("Vote account {} already exists", vote_account_address) + } else { + format!( + "Account {} already exists and is not a vote account", + vote_account_address + ) + }; + return Err(CliError::BadParameter(err_msg).into()); + } + } + + if let Some(nonce_account) = &nonce_account { + let nonce_account = nonce_utils::get_account_with_commitment( + rpc_client, + nonce_account, + config.commitment, + )?; + check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; + } + } + let mut tx = Transaction::new_unsigned(message); - tx.try_sign(&config.signers, latest_blockhash)?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, config) + if sign_only { + tx.try_partial_sign(&config.signers, recent_blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + tx.try_sign(&config.signers, recent_blockhash)?; + let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); + log_instruction_custom_error::(result, config) + } } +#[allow(clippy::too_many_arguments)] pub fn process_vote_authorize( rpc_client: &RpcClient, config: &CliConfig, @@ -724,30 +881,42 @@ pub fn process_vote_authorize( vote_authorize: VoteAuthorize, authorized: SignerIndex, new_authorized: Option, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option<&String>, + fee_payer: SignerIndex, ) -> ProcessResult { let authorized = config.signers[authorized]; let new_authorized_signer = new_authorized.map(|index| config.signers[index]); - let (_, vote_state) = get_vote_account(rpc_client, vote_account_pubkey, config.commitment)?; + let vote_state = if !sign_only { + Some(get_vote_account(rpc_client, vote_account_pubkey, config.commitment)?.1) + } else { + None + }; match vote_authorize { VoteAuthorize::Voter => { - let current_epoch = rpc_client.get_epoch_info()?.epoch; - let current_authorized_voter = vote_state - .authorized_voters() - .get_authorized_voter(current_epoch) - .ok_or_else(|| { - CliError::RpcRequestError( - "Invalid vote account state; no authorized voters found".to_string(), - ) - })?; - check_current_authority(¤t_authorized_voter, &authorized.pubkey())?; - if let Some(signer) = new_authorized_signer { - if signer.is_interactive() { - return Err(CliError::BadParameter(format!( - "invalid new authorized vote signer {:?}. Interactive vote signers not supported", - new_authorized_pubkey - )).into()); + if let Some(vote_state) = vote_state { + let current_epoch = rpc_client.get_epoch_info()?.epoch; + let current_authorized_voter = vote_state + .authorized_voters() + .get_authorized_voter(current_epoch) + .ok_or_else(|| { + CliError::RpcRequestError( + "Invalid vote account state; no authorized voters found".to_string(), + ) + })?; + check_current_authority(¤t_authorized_voter, &authorized.pubkey())?; + if let Some(signer) = new_authorized_signer { + if signer.is_interactive() { + return Err(CliError::BadParameter(format!( + "invalid new authorized vote signer {:?}. Interactive vote signers not supported", + new_authorized_pubkey + )).into()); + } } } } @@ -756,11 +925,12 @@ pub fn process_vote_authorize( (&authorized.pubkey(), "authorized_account".to_string()), (new_authorized_pubkey, "new_authorized_pubkey".to_string()), )?; - check_current_authority(&vote_state.authorized_withdrawer, &authorized.pubkey())? + if let Some(vote_state) = vote_state { + check_current_authority(&vote_state.authorized_withdrawer, &authorized.pubkey())? + } } } - let latest_blockhash = rpc_client.get_latest_blockhash()?; let vote_ix = if new_authorized_signer.is_some() { vote_instruction::authorize_checked( vote_account_pubkey, // vote account to update @@ -778,26 +948,67 @@ pub fn process_vote_authorize( }; let ixs = vec![vote_ix].with_memo(memo); - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; + + let nonce_authority = config.signers[nonce_authority]; + let fee_payer = config.signers[fee_payer]; + + let message = if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) + }; let mut tx = Transaction::new_unsigned(message); - tx.try_sign(&config.signers, latest_blockhash)?; - check_account_for_fee_with_commitment( - rpc_client, - &config.signers[0].pubkey(), - &tx.message, - config.commitment, - )?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, config) + + if sign_only { + tx.try_partial_sign(&config.signers, recent_blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + tx.try_sign(&config.signers, recent_blockhash)?; + if let Some(nonce_account) = &nonce_account { + let nonce_account = nonce_utils::get_account_with_commitment( + rpc_client, + nonce_account, + config.commitment, + )?; + check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; + } + check_account_for_fee_with_commitment( + rpc_client, + &config.signers[0].pubkey(), + &tx.message, + config.commitment, + )?; + let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); + log_instruction_custom_error::(result, config) + } } +#[allow(clippy::too_many_arguments)] pub fn process_vote_update_validator( rpc_client: &RpcClient, config: &CliConfig, vote_account_pubkey: &Pubkey, new_identity_account: SignerIndex, withdraw_authority: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option<&String>, + fee_payer: SignerIndex, ) -> ProcessResult { let authorized_withdrawer = config.signers[withdraw_authority]; let new_identity_account = config.signers[new_identity_account]; @@ -806,55 +1017,123 @@ pub fn process_vote_update_validator( (vote_account_pubkey, "vote_account_pubkey".to_string()), (&new_identity_pubkey, "new_identity_account".to_string()), )?; - let latest_blockhash = rpc_client.get_latest_blockhash()?; + let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; let ixs = vec![vote_instruction::update_validator_identity( vote_account_pubkey, &authorized_withdrawer.pubkey(), &new_identity_pubkey, )] .with_memo(memo); - - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let nonce_authority = config.signers[nonce_authority]; + let fee_payer = config.signers[fee_payer]; + + let message = if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) + }; let mut tx = Transaction::new_unsigned(message); - tx.try_sign(&config.signers, latest_blockhash)?; - check_account_for_fee_with_commitment( - rpc_client, - &config.signers[0].pubkey(), - &tx.message, - config.commitment, - )?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, config) + + if sign_only { + tx.try_partial_sign(&config.signers, recent_blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + tx.try_sign(&config.signers, recent_blockhash)?; + if let Some(nonce_account) = &nonce_account { + let nonce_account = nonce_utils::get_account_with_commitment( + rpc_client, + nonce_account, + config.commitment, + )?; + check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; + } + check_account_for_fee_with_commitment( + rpc_client, + &config.signers[0].pubkey(), + &tx.message, + config.commitment, + )?; + let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); + log_instruction_custom_error::(result, config) + } } +#[allow(clippy::too_many_arguments)] pub fn process_vote_update_commission( rpc_client: &RpcClient, config: &CliConfig, vote_account_pubkey: &Pubkey, commission: u8, withdraw_authority: SignerIndex, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, + nonce_account: Option, + nonce_authority: SignerIndex, memo: Option<&String>, + fee_payer: SignerIndex, ) -> ProcessResult { let authorized_withdrawer = config.signers[withdraw_authority]; - let latest_blockhash = rpc_client.get_latest_blockhash()?; + let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; let ixs = vec![vote_instruction::update_commission( vote_account_pubkey, &authorized_withdrawer.pubkey(), commission, )] .with_memo(memo); - - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); + let nonce_authority = config.signers[nonce_authority]; + let fee_payer = config.signers[fee_payer]; + + let message = if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) + }; let mut tx = Transaction::new_unsigned(message); - tx.try_sign(&config.signers, latest_blockhash)?; - check_account_for_fee_with_commitment( - rpc_client, - &config.signers[0].pubkey(), - &tx.message, - config.commitment, - )?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); - log_instruction_custom_error::(result, config) + if sign_only { + tx.try_partial_sign(&config.signers, recent_blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + tx.try_sign(&config.signers, recent_blockhash)?; + if let Some(nonce_account) = &nonce_account { + let nonce_account = nonce_utils::get_account_with_commitment( + rpc_client, + nonce_account, + config.commitment, + )?; + check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; + } + check_account_for_fee_with_commitment( + rpc_client, + &config.signers[0].pubkey(), + &tx.message, + config.commitment, + )?; + let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); + log_instruction_custom_error::(result, config) + } } fn get_vote_account( @@ -945,6 +1224,7 @@ pub fn process_show_vote_account( Ok(config.output_format.formatted_string(&vote_account_data)) } +#[allow(clippy::too_many_arguments)] pub fn process_withdraw_from_vote_account( rpc_client: &RpcClient, config: &CliConfig, @@ -952,46 +1232,97 @@ pub fn process_withdraw_from_vote_account( withdraw_authority: SignerIndex, withdraw_amount: SpendAmount, destination_account_pubkey: &Pubkey, + sign_only: bool, + dump_transaction_message: bool, + blockhash_query: &BlockhashQuery, + nonce_account: Option<&Pubkey>, + nonce_authority: SignerIndex, memo: Option<&String>, + fee_payer: SignerIndex, ) -> ProcessResult { - let latest_blockhash = rpc_client.get_latest_blockhash()?; let withdraw_authority = config.signers[withdraw_authority]; + let recent_blockhash = blockhash_query.get_blockhash(rpc_client, config.commitment)?; - let current_balance = rpc_client.get_balance(vote_account_pubkey)?; - let minimum_balance = rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?; + let fee_payer = config.signers[fee_payer]; + let nonce_authority = config.signers[nonce_authority]; - let lamports = match withdraw_amount { - SpendAmount::All => current_balance.saturating_sub(minimum_balance), - SpendAmount::Some(withdraw_amount) => { - if current_balance.saturating_sub(withdraw_amount) < minimum_balance { + let build_message = |lamports| { + let ixs = vec![withdraw( + vote_account_pubkey, + &withdraw_authority.pubkey(), + lamports, + destination_account_pubkey, + )] + .with_memo(memo); + + if let Some(nonce_account) = &nonce_account { + Message::new_with_nonce( + ixs, + Some(&fee_payer.pubkey()), + nonce_account, + &nonce_authority.pubkey(), + ) + } else { + Message::new(&ixs, Some(&fee_payer.pubkey())) + } + }; + + let (message, _) = resolve_spend_tx_and_check_account_balances( + rpc_client, + sign_only, + withdraw_amount, + &recent_blockhash, + vote_account_pubkey, + &fee_payer.pubkey(), + build_message, + config.commitment, + )?; + + if !sign_only { + let current_balance = rpc_client.get_balance(vote_account_pubkey)?; + let minimum_balance = + rpc_client.get_minimum_balance_for_rent_exemption(VoteState::size_of())?; + if let SpendAmount::Some(withdraw_amount) = withdraw_amount { + let balance_remaining = current_balance.saturating_sub(withdraw_amount); + if balance_remaining < minimum_balance && balance_remaining != 0 { return Err(CliError::BadParameter(format!( "Withdraw amount too large. The vote account balance must be at least {} SOL to remain rent exempt", lamports_to_sol(minimum_balance) )) .into()); } - withdraw_amount } - }; + } - let ixs = vec![withdraw( - vote_account_pubkey, - &withdraw_authority.pubkey(), - lamports, - destination_account_pubkey, - )] - .with_memo(memo); + let mut tx = Transaction::new_unsigned(message); - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); - let mut transaction = Transaction::new_unsigned(message); - transaction.try_sign(&config.signers, latest_blockhash)?; - check_account_for_fee_with_commitment( - rpc_client, - &config.signers[0].pubkey(), - &transaction.message, - config.commitment, - )?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction); - log_instruction_custom_error::(result, config) + if sign_only { + tx.try_partial_sign(&config.signers, recent_blockhash)?; + return_signers_with_config( + &tx, + &config.output_format, + &ReturnSignersConfig { + dump_transaction_message, + }, + ) + } else { + tx.try_sign(&config.signers, recent_blockhash)?; + if let Some(nonce_account) = &nonce_account { + let nonce_account = nonce_utils::get_account_with_commitment( + rpc_client, + nonce_account, + config.commitment, + )?; + check_nonce_account(&nonce_account, &nonce_authority.pubkey(), &recent_blockhash)?; + } + check_account_for_fee_with_commitment( + rpc_client, + &tx.message.account_keys[0], + &tx.message, + config.commitment, + )?; + let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); + log_instruction_custom_error::(result, config) + } } pub fn process_close_vote_account( @@ -1001,6 +1332,7 @@ pub fn process_close_vote_account( withdraw_authority: SignerIndex, destination_account_pubkey: &Pubkey, memo: Option<&String>, + fee_payer: SignerIndex, ) -> ProcessResult { let vote_account_status = rpc_client.get_vote_accounts_with_config(RpcGetVoteAccountsConfig { @@ -1025,6 +1357,7 @@ pub fn process_close_vote_account( let latest_blockhash = rpc_client.get_latest_blockhash()?; let withdraw_authority = config.signers[withdraw_authority]; + let fee_payer = config.signers[fee_payer]; let current_balance = rpc_client.get_balance(vote_account_pubkey)?; @@ -1036,16 +1369,16 @@ pub fn process_close_vote_account( )] .with_memo(memo); - let message = Message::new(&ixs, Some(&config.signers[0].pubkey())); - let mut transaction = Transaction::new_unsigned(message); - transaction.try_sign(&config.signers, latest_blockhash)?; + let message = Message::new(&ixs, Some(&fee_payer.pubkey())); + let mut tx = Transaction::new_unsigned(message); + tx.try_sign(&config.signers, latest_blockhash)?; check_account_for_fee_with_commitment( rpc_client, - &config.signers[0].pubkey(), - &transaction.message, + &tx.message.account_keys[0], + &tx.message, config.commitment, )?; - let result = rpc_client.send_and_confirm_transaction_with_spinner(&transaction); + let result = rpc_client.send_and_confirm_transaction_with_spinner(&tx); log_instruction_custom_error::(result, config) } @@ -1054,7 +1387,12 @@ mod tests { use { super::*, crate::{clap_app::get_clap_app, cli::parse_command}, - solana_sdk::signature::{read_keypair_file, write_keypair, Keypair, Signer}, + solana_client::blockhash_query, + solana_sdk::{ + hash::Hash, + signature::{read_keypair_file, write_keypair, Keypair, Signer}, + signer::presigner::Presigner, + }, tempfile::NamedTempFile, }; @@ -1072,12 +1410,19 @@ mod tests { let keypair2 = Keypair::new(); let pubkey2 = keypair2.pubkey(); let pubkey2_string = pubkey2.to_string(); + let sig2 = keypair2.sign_message(&[0u8]); + let signer2 = format!("{}={}", keypair2.pubkey(), sig2); let default_keypair = Keypair::new(); let (default_keypair_file, mut tmp_file) = make_tmp_file(); write_keypair(&default_keypair, tmp_file.as_file_mut()).unwrap(); let default_signer = DefaultSigner::new("", &default_keypair_file); + let blockhash = Hash::default(); + let blockhash_string = format!("{}", blockhash); + let nonce_account = Pubkey::new_unique(); + + // Test VoteAuthorize SubCommand let test_authorize_voter = test_commands.clone().get_matches_from(vec![ "test", "vote-authorize-voter", @@ -1092,7 +1437,13 @@ mod tests { vote_account_pubkey: pubkey, new_authorized_pubkey: pubkey2, vote_authorize: VoteAuthorize::Voter, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 0, new_authorized: None, }, @@ -1118,7 +1469,13 @@ mod tests { vote_account_pubkey: pubkey, new_authorized_pubkey: pubkey2, vote_authorize: VoteAuthorize::Voter, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 1, new_authorized: None, }, @@ -1129,6 +1486,89 @@ mod tests { } ); + let test_authorize_voter = test_commands.clone().get_matches_from(vec![ + "test", + "vote-authorize-voter", + &pubkey_string, + &authorized_keypair_file, + &pubkey2_string, + "--blockhash", + &blockhash_string, + "--sign-only", + ]); + assert_eq!( + parse_command(&test_authorize_voter, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::VoteAuthorize { + vote_account_pubkey: pubkey, + new_authorized_pubkey: pubkey2, + vote_authorize: VoteAuthorize::Voter, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + authorized: 1, + new_authorized: None, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&authorized_keypair_file).unwrap().into(), + ], + } + ); + + let authorized_sig = authorized_keypair.sign_message(&[0u8]); + let authorized_signer = format!("{}={}", authorized_keypair.pubkey(), authorized_sig); + let test_authorize_voter = test_commands.clone().get_matches_from(vec![ + "test", + "vote-authorize-voter", + &pubkey_string, + &authorized_keypair.pubkey().to_string(), + &pubkey2_string, + "--blockhash", + &blockhash_string, + "--signer", + &authorized_signer, + "--signer", + &signer2, + "--fee-payer", + &pubkey2_string, + "--nonce", + &nonce_account.to_string(), + "--nonce-authority", + &pubkey2_string, + ]); + assert_eq!( + parse_command(&test_authorize_voter, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::VoteAuthorize { + vote_account_pubkey: pubkey, + new_authorized_pubkey: pubkey2, + vote_authorize: VoteAuthorize::Voter, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator( + blockhash_query::Source::NonceAccount(nonce_account), + blockhash + ), + nonce_account: Some(nonce_account), + nonce_authority: 0, + memo: None, + fee_payer: 0, + authorized: 1, + new_authorized: None, + }, + signers: vec![ + Presigner::new(&pubkey2, &sig2).into(), + Presigner::new(&authorized_keypair.pubkey(), &authorized_sig).into(), + ], + } + ); + + // Test checked VoteAuthorize SubCommand let (voter_keypair_file, mut tmp_file) = make_tmp_file(); let voter_keypair = Keypair::new(); write_keypair(&voter_keypair, tmp_file.as_file_mut()).unwrap(); @@ -1147,7 +1587,13 @@ mod tests { vote_account_pubkey: pubkey, new_authorized_pubkey: voter_keypair.pubkey(), vote_authorize: VoteAuthorize::Voter, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 0, new_authorized: Some(1), }, @@ -1172,7 +1618,13 @@ mod tests { vote_account_pubkey: pubkey, new_authorized_pubkey: voter_keypair.pubkey(), vote_authorize: VoteAuthorize::Voter, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 1, new_authorized: Some(2), }, @@ -1193,14 +1645,15 @@ mod tests { ]); assert!(parse_command(&test_authorize_voter, &default_signer, &mut None).is_err()); - let (keypair_file, mut tmp_file) = make_tmp_file(); - let keypair = Keypair::new(); - write_keypair(&keypair, tmp_file.as_file_mut()).unwrap(); // Test CreateVoteAccount SubCommand let (identity_keypair_file, mut tmp_file) = make_tmp_file(); let identity_keypair = Keypair::new(); let authorized_withdrawer = Keypair::new().pubkey(); write_keypair(&identity_keypair, tmp_file.as_file_mut()).unwrap(); + let (keypair_file, mut tmp_file) = make_tmp_file(); + let keypair = Keypair::new(); + write_keypair(&keypair, tmp_file.as_file_mut()).unwrap(); + let test_create_vote_account = test_commands.clone().get_matches_from(vec![ "test", "create-vote-account", @@ -1220,20 +1673,22 @@ mod tests { authorized_voter: None, authorized_withdrawer, commission: 10, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), - Box::new(keypair), + read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&identity_keypair_file).unwrap().into(), ], } ); - let (keypair_file, mut tmp_file) = make_tmp_file(); - let keypair = Keypair::new(); - write_keypair(&keypair, tmp_file.as_file_mut()).unwrap(); - let test_create_vote_account2 = test_commands.clone().get_matches_from(vec![ "test", "create-vote-account", @@ -1251,16 +1706,115 @@ mod tests { authorized_voter: None, authorized_withdrawer, commission: 100, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), - Box::new(keypair), + read_keypair_file(&keypair_file).unwrap().into(), + read_keypair_file(&identity_keypair_file).unwrap().into(), + ], + } + ); + + let test_create_vote_account = test_commands.clone().get_matches_from(vec![ + "test", + "create-vote-account", + &keypair_file, + &identity_keypair_file, + &authorized_withdrawer.to_string(), + "--commission", + "10", + "--blockhash", + &blockhash_string, + "--sign-only", + "--fee-payer", + &default_keypair.pubkey().to_string(), + ]); + assert_eq!( + parse_command(&test_create_vote_account, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::CreateVoteAccount { + vote_account: 1, + seed: None, + identity_account: 2, + authorized_voter: None, + authorized_withdrawer, + commission: 10, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&identity_keypair_file).unwrap().into(), ], } ); + let identity_sig = identity_keypair.sign_message(&[0u8]); + let identity_signer = format!("{}={}", identity_keypair.pubkey(), identity_sig); + let test_create_vote_account = test_commands.clone().get_matches_from(vec![ + "test", + "create-vote-account", + &keypair_file, + &identity_keypair.pubkey().to_string(), + &authorized_withdrawer.to_string(), + "--commission", + "10", + "--blockhash", + &blockhash_string, + "--signer", + &identity_signer, + "--signer", + &signer2, + "--fee-payer", + &default_keypair_file, + "--nonce", + &nonce_account.to_string(), + "--nonce-authority", + &pubkey2_string, + ]); + assert_eq!( + parse_command(&test_create_vote_account, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::CreateVoteAccount { + vote_account: 1, + seed: None, + identity_account: 2, + authorized_voter: None, + authorized_withdrawer, + commission: 10, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator( + blockhash_query::Source::NonceAccount(nonce_account), + blockhash + ), + nonce_account: Some(nonce_account), + nonce_authority: 3, + memo: None, + fee_payer: 0, + }, + signers: vec![ + read_keypair_file(&default_keypair_file).unwrap().into(), + read_keypair_file(&keypair_file).unwrap().into(), + Presigner::new(&identity_keypair.pubkey(), &identity_sig).into(), + Presigner::new(&pubkey2, &sig2).into(), + ], + } + ); + // test init with an authed voter let authed = solana_sdk::pubkey::new_rand(); let (keypair_file, mut tmp_file) = make_tmp_file(); @@ -1286,7 +1840,13 @@ mod tests { authorized_voter: Some(authed), authorized_withdrawer, commission: 100, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -1318,11 +1878,17 @@ mod tests { authorized_voter: None, authorized_withdrawer: identity_keypair.pubkey(), commission: 100, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), - Box::new(keypair), + read_keypair_file(&keypair_file).unwrap().into(), read_keypair_file(&identity_keypair_file).unwrap().into(), ], } @@ -1342,7 +1908,13 @@ mod tests { vote_account_pubkey: pubkey, new_identity_account: 2, withdraw_authority: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -1366,7 +1938,13 @@ mod tests { vote_account_pubkey: pubkey, commission: 42, withdraw_authority: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -1391,7 +1969,13 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 0, withdraw_amount: SpendAmount::Some(42_000_000_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -1412,8 +1996,14 @@ mod tests { vote_account_pubkey: read_keypair_file(&keypair_file).unwrap().pubkey(), destination_account_pubkey: pubkey, withdraw_authority: 0, - withdraw_amount: SpendAmount::All, + withdraw_amount: SpendAmount::RentExempt, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -1440,7 +2030,13 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 1, withdraw_amount: SpendAmount::Some(42_000_000_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), @@ -1449,6 +2045,81 @@ mod tests { } ); + // Test WithdrawFromVoteAccount subcommand with offline authority + let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![ + "test", + "withdraw-from-vote-account", + &keypair.pubkey().to_string(), + &pubkey_string, + "42", + "--authorized-withdrawer", + &withdraw_authority_file, + "--blockhash", + &blockhash_string, + "--sign-only", + "--fee-payer", + &withdraw_authority_file, + ]); + assert_eq!( + parse_command(&test_withdraw_from_vote_account, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::WithdrawFromVoteAccount { + vote_account_pubkey: keypair.pubkey(), + destination_account_pubkey: pubkey, + withdraw_authority: 0, + withdraw_amount: SpendAmount::Some(42_000_000_000), + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }, + signers: vec![read_keypair_file(&withdraw_authority_file).unwrap().into()], + } + ); + + let authorized_sig = withdraw_authority.sign_message(&[0u8]); + let authorized_signer = format!("{}={}", withdraw_authority.pubkey(), authorized_sig); + let test_withdraw_from_vote_account = test_commands.clone().get_matches_from(vec![ + "test", + "withdraw-from-vote-account", + &keypair.pubkey().to_string(), + &pubkey_string, + "42", + "--authorized-withdrawer", + &withdraw_authority.pubkey().to_string(), + "--blockhash", + &blockhash_string, + "--signer", + &authorized_signer, + "--fee-payer", + &withdraw_authority.pubkey().to_string(), + ]); + assert_eq!( + parse_command(&test_withdraw_from_vote_account, &default_signer, &mut None).unwrap(), + CliCommandInfo { + command: CliCommand::WithdrawFromVoteAccount { + vote_account_pubkey: keypair.pubkey(), + destination_account_pubkey: pubkey, + withdraw_authority: 0, + withdraw_amount: SpendAmount::Some(42_000_000_000), + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator( + blockhash_query::Source::Cluster, + blockhash + ), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }, + signers: vec![Presigner::new(&withdraw_authority.pubkey(), &authorized_sig).into(),], + } + ); + // Test CloseVoteAccount subcommand let test_close_vote_account = test_commands.clone().get_matches_from(vec![ "test", @@ -1464,6 +2135,7 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 0, memo: None, + fee_payer: 0, }, signers: vec![read_keypair_file(&default_keypair_file).unwrap().into()], } @@ -1489,6 +2161,7 @@ mod tests { destination_account_pubkey: pubkey, withdraw_authority: 1, memo: None, + fee_payer: 0, }, signers: vec![ read_keypair_file(&default_keypair_file).unwrap().into(), diff --git a/cli/src/wallet.rs b/cli/src/wallet.rs index 0abcffccbc1991..8f53a0cd755f3e 100644 --- a/cli/src/wallet.rs +++ b/cli/src/wallet.rs @@ -462,18 +462,27 @@ pub fn process_show_account( let mut account_string = config.output_format.formatted_string(&cli_account); - if config.output_format == OutputFormat::Display - || config.output_format == OutputFormat::DisplayVerbose - { - if let Some(output_file) = output_file { - let mut f = File::create(output_file)?; - f.write_all(&data)?; - writeln!(&mut account_string)?; - writeln!(&mut account_string, "Wrote account data to {}", output_file)?; - } else if !data.is_empty() { - use pretty_hex::*; - writeln!(&mut account_string, "{:?}", data.hex_dump())?; + match config.output_format { + OutputFormat::Json | OutputFormat::JsonCompact => { + if let Some(output_file) = output_file { + let mut f = File::create(output_file)?; + f.write_all(account_string.as_bytes())?; + writeln!(&mut account_string)?; + writeln!(&mut account_string, "Wrote account to {}", output_file)?; + } } + OutputFormat::Display | OutputFormat::DisplayVerbose => { + if let Some(output_file) = output_file { + let mut f = File::create(output_file)?; + f.write_all(&data)?; + writeln!(&mut account_string)?; + writeln!(&mut account_string, "Wrote account data to {}", output_file)?; + } else if !data.is_empty() { + use pretty_hex::*; + writeln!(&mut account_string, "{:?}", data.hex_dump())?; + } + } + OutputFormat::DisplayQuiet => (), } Ok(account_string) diff --git a/cli/tests/stake.rs b/cli/tests/stake.rs index 2b77535b121672..da9239d3d28ccb 100644 --- a/cli/tests/stake.rs +++ b/cli/tests/stake.rs @@ -59,7 +59,13 @@ fn test_stake_delegation_force() { authorized_voter: None, authorized_withdrawer, commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; process_command(&config).unwrap(); diff --git a/cli/tests/vote.rs b/cli/tests/vote.rs index ad3dd0714e2c10..ab52ec02475fb8 100644 --- a/cli/tests/vote.rs +++ b/cli/tests/vote.rs @@ -4,6 +4,7 @@ use { spend_utils::SpendAmount, test_utils::check_recent_balance, }, + solana_cli_output::{parse_sign_only_reply_string, OutputFormat}, solana_client::{ blockhash_query::{self, BlockhashQuery}, rpc_client::RpcClient, @@ -12,7 +13,7 @@ use { solana_sdk::{ account_utils::StateMut, commitment_config::CommitmentConfig, - signature::{Keypair, Signer}, + signature::{Keypair, NullSigner, Signer}, }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, @@ -49,7 +50,13 @@ fn test_vote_authorize_and_withdraw() { authorized_voter: None, authorized_withdrawer: config.signers[0].pubkey(), commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; process_command(&config).unwrap(); let vote_account = rpc_client @@ -93,7 +100,13 @@ fn test_vote_authorize_and_withdraw() { vote_account_pubkey, new_authorized_pubkey: first_withdraw_authority.pubkey(), vote_authorize: VoteAuthorize::Withdrawer, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 0, new_authorized: None, }; @@ -112,7 +125,13 @@ fn test_vote_authorize_and_withdraw() { vote_account_pubkey, new_authorized_pubkey: withdraw_authority.pubkey(), vote_authorize: VoteAuthorize::Withdrawer, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 1, new_authorized: Some(1), }; @@ -126,7 +145,13 @@ fn test_vote_authorize_and_withdraw() { vote_account_pubkey, new_authorized_pubkey: withdraw_authority.pubkey(), vote_authorize: VoteAuthorize::Withdrawer, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, authorized: 1, new_authorized: Some(2), }; @@ -146,7 +171,13 @@ fn test_vote_authorize_and_withdraw() { withdraw_authority: 1, withdraw_amount: SpendAmount::Some(100), destination_account_pubkey: destination_account, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; process_command(&config).unwrap(); let expected_balance = expected_balance - 100; @@ -160,7 +191,13 @@ fn test_vote_authorize_and_withdraw() { vote_account_pubkey, new_identity_account: 2, withdraw_authority: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, memo: None, + fee_payer: 0, }; process_command(&config).unwrap(); @@ -172,8 +209,283 @@ fn test_vote_authorize_and_withdraw() { withdraw_authority: 1, destination_account_pubkey: destination_account, memo: None, + fee_payer: 0, }; process_command(&config).unwrap(); check_recent_balance(0, &rpc_client, &vote_account_pubkey); check_recent_balance(expected_balance, &rpc_client, &destination_account); } + +#[test] +fn test_offline_vote_authorize_and_withdraw() { + let mint_keypair = Keypair::new(); + let mint_pubkey = mint_keypair.pubkey(); + let faucet_addr = run_local_faucet(mint_keypair, None); + let test_validator = + TestValidator::with_no_fees(mint_pubkey, Some(faucet_addr), SocketAddrSpace::Unspecified); + + let rpc_client = + RpcClient::new_with_commitment(test_validator.rpc_url(), CommitmentConfig::processed()); + let default_signer = Keypair::new(); + + let mut config_payer = CliConfig::recent_for_tests(); + config_payer.json_rpc_url = test_validator.rpc_url(); + config_payer.signers = vec![&default_signer]; + + let mut config_offline = CliConfig::recent_for_tests(); + config_offline.json_rpc_url = String::default(); + config_offline.command = CliCommand::ClusterVersion; + let offline_keypair = Keypair::new(); + config_offline.signers = vec![&offline_keypair]; + // Verify that we cannot reach the cluster + process_command(&config_offline).unwrap_err(); + + request_and_confirm_airdrop( + &rpc_client, + &config_payer, + &config_payer.signers[0].pubkey(), + 100_000, + ) + .unwrap(); + check_recent_balance(100_000, &rpc_client, &config_payer.signers[0].pubkey()); + + request_and_confirm_airdrop( + &rpc_client, + &config_offline, + &config_offline.signers[0].pubkey(), + 100_000, + ) + .unwrap(); + check_recent_balance(100_000, &rpc_client, &config_offline.signers[0].pubkey()); + + // Create vote account with specific withdrawer + let vote_account_keypair = Keypair::new(); + let vote_account_pubkey = vote_account_keypair.pubkey(); + config_payer.signers = vec![&default_signer, &vote_account_keypair]; + config_payer.command = CliCommand::CreateVoteAccount { + vote_account: 1, + seed: None, + identity_account: 0, + authorized_voter: None, + authorized_withdrawer: offline_keypair.pubkey(), + commission: 0, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config_payer).unwrap(); + let vote_account = rpc_client + .get_account(&vote_account_keypair.pubkey()) + .unwrap(); + let vote_state: VoteStateVersions = vote_account.state().unwrap(); + let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + assert_eq!(authorized_withdrawer, offline_keypair.pubkey()); + let expected_balance = rpc_client + .get_minimum_balance_for_rent_exemption(VoteState::size_of()) + .unwrap() + .max(1); + check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey); + + // Transfer in some more SOL + config_payer.signers = vec![&default_signer]; + config_payer.command = CliCommand::Transfer { + amount: SpendAmount::Some(1_000), + to: vote_account_pubkey, + from: 0, + sign_only: false, + dump_transaction_message: false, + allow_unfunded_recipient: true, + no_wait: false, + blockhash_query: BlockhashQuery::All(blockhash_query::Source::Cluster), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + derived_address_seed: None, + derived_address_program_id: None, + }; + process_command(&config_payer).unwrap(); + let expected_balance = expected_balance + 1_000; + check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey); + + // Authorize vote account withdrawal to another signer, offline + let withdraw_authority = Keypair::new(); + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + config_offline.command = CliCommand::VoteAuthorize { + vote_account_pubkey, + new_authorized_pubkey: withdraw_authority.pubkey(), + vote_authorize: VoteAuthorize::Withdrawer, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + authorized: 0, + new_authorized: None, + }; + config_offline.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config_offline).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + assert!(sign_only.has_all_signers()); + let offline_presigner = sign_only + .presigner_of(&config_offline.signers[0].pubkey()) + .unwrap(); + config_payer.signers = vec![&offline_presigner]; + config_payer.command = CliCommand::VoteAuthorize { + vote_account_pubkey, + new_authorized_pubkey: withdraw_authority.pubkey(), + vote_authorize: VoteAuthorize::Withdrawer, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + authorized: 0, + new_authorized: None, + }; + process_command(&config_payer).unwrap(); + let vote_account = rpc_client + .get_account(&vote_account_keypair.pubkey()) + .unwrap(); + let vote_state: VoteStateVersions = vote_account.state().unwrap(); + let authorized_withdrawer = vote_state.convert_to_current().authorized_withdrawer; + assert_eq!(authorized_withdrawer, withdraw_authority.pubkey()); + + // Withdraw from vote account offline + let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + let fee_payer_null_signer = NullSigner::new(&default_signer.pubkey()); + config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority]; + config_offline.command = CliCommand::WithdrawFromVoteAccount { + vote_account_pubkey, + withdraw_authority: 1, + withdraw_amount: SpendAmount::Some(100), + destination_account_pubkey: destination_account, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + config_offline.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config_offline).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_presigner = sign_only + .presigner_of(&config_offline.signers[1].pubkey()) + .unwrap(); + config_payer.signers = vec![&default_signer, &offline_presigner]; + config_payer.command = CliCommand::WithdrawFromVoteAccount { + vote_account_pubkey, + withdraw_authority: 1, + withdraw_amount: SpendAmount::Some(100), + destination_account_pubkey: destination_account, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config_payer).unwrap(); + let expected_balance = expected_balance - 100; + check_recent_balance(expected_balance, &rpc_client, &vote_account_pubkey); + check_recent_balance(100, &rpc_client, &destination_account); + + // Re-assign validator identity offline + let blockhash = rpc_client.get_latest_blockhash().unwrap(); + let new_identity_keypair = Keypair::new(); + let new_identity_null_signer = NullSigner::new(&new_identity_keypair.pubkey()); + config_offline.signers = vec![ + &fee_payer_null_signer, + &withdraw_authority, + &new_identity_null_signer, + ]; + config_offline.command = CliCommand::VoteUpdateValidator { + vote_account_pubkey, + new_identity_account: 2, + withdraw_authority: 1, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config_offline).unwrap(); + config_offline.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config_offline).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_presigner = sign_only + .presigner_of(&config_offline.signers[1].pubkey()) + .unwrap(); + config_payer.signers = vec![&default_signer, &offline_presigner, &new_identity_keypair]; + config_payer.command = CliCommand::VoteUpdateValidator { + vote_account_pubkey, + new_identity_account: 2, + withdraw_authority: 1, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config_payer).unwrap(); + + // Close vote account offline. Must use WithdrawFromVoteAccount and specify amount, since + // CloseVoteAccount requires RpcClient + let destination_account = solana_sdk::pubkey::new_rand(); // Send withdrawal to new account to make balance check easy + config_offline.signers = vec![&fee_payer_null_signer, &withdraw_authority]; + config_offline.command = CliCommand::WithdrawFromVoteAccount { + vote_account_pubkey, + withdraw_authority: 1, + withdraw_amount: SpendAmount::Some(expected_balance), + destination_account_pubkey: destination_account, + sign_only: true, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::None(blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + process_command(&config_offline).unwrap(); + config_offline.output_format = OutputFormat::JsonCompact; + let sig_response = process_command(&config_offline).unwrap(); + let sign_only = parse_sign_only_reply_string(&sig_response); + let offline_presigner = sign_only + .presigner_of(&config_offline.signers[1].pubkey()) + .unwrap(); + config_payer.signers = vec![&default_signer, &offline_presigner]; + config_payer.command = CliCommand::WithdrawFromVoteAccount { + vote_account_pubkey, + withdraw_authority: 1, + withdraw_amount: SpendAmount::Some(expected_balance), + destination_account_pubkey: destination_account, + sign_only: false, + dump_transaction_message: false, + blockhash_query: BlockhashQuery::FeeCalculator(blockhash_query::Source::Cluster, blockhash), + nonce_account: None, + nonce_authority: 0, + memo: None, + fee_payer: 0, + }; + let result = process_command(&config_payer).unwrap(); + println!("{:?}", result); + check_recent_balance(0, &rpc_client, &vote_account_pubkey); + println!("what"); + check_recent_balance(expected_balance, &rpc_client, &destination_account); +} diff --git a/client-test/Cargo.toml b/client-test/Cargo.toml index 5991b72b130f88..c732b8511e01ee 100644 --- a/client-test/Cargo.toml +++ b/client-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-client-test" -version = "1.9.0" +version = "1.9.4" description = "Solana RPC Test" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,22 +12,24 @@ edition = "2021" [dependencies] serde_json = "1.0.72" serial_test = "0.5.1" -solana-client = { path = "../client", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-test-validator = { path = "../test-validator", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-test-validator = { path = "../test-validator", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } systemstat = "0.1.10" [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client-test/tests/client.rs b/client-test/tests/client.rs index 799cdc41d7a31d..8f0116e106e71f 100644 --- a/client-test/tests/client.rs +++ b/client-test/tests/client.rs @@ -4,11 +4,16 @@ use { solana_client::{ pubsub_client::PubsubClient, rpc_client::RpcClient, - rpc_config::{RpcAccountInfoConfig, RpcProgramAccountsConfig}, - rpc_response::SlotInfo, + rpc_config::{ + RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter, + RpcProgramAccountsConfig, + }, + rpc_response::{RpcBlockUpdate, SlotInfo}, }, + solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, solana_rpc::{ optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, + rpc::create_test_transactions_and_populate_blockstore, rpc_pubsub_service::{PubSubConfig, PubSubService}, rpc_subscriptions::RpcSubscriptions, }, @@ -20,7 +25,7 @@ use { }, solana_sdk::{ clock::Slot, - commitment_config::CommitmentConfig, + commitment_config::{CommitmentConfig, CommitmentLevel}, native_token::sol_to_lamports, pubkey::Pubkey, rpc_port, @@ -29,11 +34,12 @@ use { }, solana_streamer::socket::SocketAddrSpace, solana_test_validator::TestValidator, + solana_transaction_status::{TransactionDetails, UiTransactionEncoding}, std::{ collections::HashSet, net::{IpAddr, SocketAddr}, sync::{ - atomic::{AtomicBool, Ordering}, + atomic::{AtomicBool, AtomicU64, Ordering}, Arc, RwLock, }, thread::sleep, @@ -119,9 +125,10 @@ fn test_account_subscription() { let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bob = Keypair::new(); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -194,6 +201,112 @@ fn test_account_subscription() { assert_eq!(errors, [].to_vec()); } +#[test] +#[serial] +fn test_block_subscription() { + // setup BankForks + let exit = Arc::new(AtomicBool::new(false)); + let GenesisConfigInfo { + genesis_config, + mint_keypair: alice, + .. + } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + + // setup Blockstore + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); + + // populate ledger with test txs + let bank = bank_forks.read().unwrap().working_bank(); + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let keypair3 = Keypair::new(); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); + let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore( + vec![&alice, &keypair1, &keypair2, &keypair3], + 0, + bank, + blockstore.clone(), + max_complete_transaction_status_slot, + ); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + // setup RpcSubscriptions && PubSubService + let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore( + &exit, + max_complete_transaction_status_slot, + blockstore.clone(), + bank_forks.clone(), + Arc::new(RwLock::new(BlockCommitmentCache::default())), + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), + )); + let pubsub_addr = SocketAddr::new( + IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), + rpc_port::DEFAULT_RPC_PUBSUB_PORT, + ); + let pub_cfg = PubSubConfig { + enable_block_subscription: true, + ..PubSubConfig::default() + }; + let (trigger, pubsub_service) = PubSubService::new(pub_cfg, &subscriptions, pubsub_addr); + + std::thread::sleep(Duration::from_millis(400)); + + // setup PubsubClient + let (mut client, receiver) = PubsubClient::block_subscribe( + &format!("ws://0.0.0.0:{}/", pubsub_addr.port()), + RpcBlockSubscribeFilter::All, + Some(RpcBlockSubscribeConfig { + commitment: Some(CommitmentConfig { + commitment: CommitmentLevel::Confirmed, + }), + encoding: Some(UiTransactionEncoding::Json), + transaction_details: Some(TransactionDetails::Signatures), + show_rewards: None, + }), + ) + .unwrap(); + + // trigger Gossip notification + let slot = bank_forks.read().unwrap().highest_slot(); + subscriptions.notify_gossip_subscribers(slot); + let maybe_actual = receiver.recv_timeout(Duration::from_millis(400)); + match maybe_actual { + Ok(actual) => { + let complete_block = blockstore.get_complete_block(slot, false).unwrap(); + let block = complete_block.clone().configure( + UiTransactionEncoding::Json, + TransactionDetails::Signatures, + false, + ); + let expected = RpcBlockUpdate { + slot, + block: Some(block), + err: None, + }; + let block = complete_block.configure( + UiTransactionEncoding::Json, + TransactionDetails::Signatures, + false, + ); + assert_eq!(actual.value.slot, expected.slot); + assert!(block.eq(&actual.value.block.unwrap())); + } + Err(e) => { + eprintln!("unexpected websocket receive timeout"); + assert_eq!(Some(e), None); + } + } + + // cleanup + exit.store(true, Ordering::Relaxed); + trigger.cancel(); + client.shutdown().unwrap(); + pubsub_service.close().unwrap(); +} + #[test] #[serial] fn test_program_subscription() { @@ -215,9 +328,10 @@ fn test_program_subscription() { let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); let bob = Keypair::new(); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -300,9 +414,10 @@ fn test_root_subscription() { let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -350,8 +465,10 @@ fn test_slot_subscription() { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, diff --git a/client/Cargo.toml b/client/Cargo.toml index 0a8a39a5adbfe7..d56c7bce49d835 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-client" -version = "1.9.0" +version = "1.9.4" description = "Solana Client" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -23,15 +23,15 @@ semver = "1.0.4" serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } thiserror = "1.0" tokio = { version = "1", features = ["full"] } tungstenite = { version = "0.16.0", features = ["rustls-tls-webpki-roots"] } @@ -40,7 +40,7 @@ url = "2.2.2" [dev-dependencies] assert_matches = "1.5.0" jsonrpc-http-server = "18.0.0" -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/src/pubsub_client.rs b/client/src/pubsub_client.rs index d5b70b5569a999..79b33b6e949b4f 100644 --- a/client/src/pubsub_client.rs +++ b/client/src/pubsub_client.rs @@ -1,12 +1,13 @@ use { crate::{ rpc_config::{ - RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, - RpcTransactionLogsConfig, RpcTransactionLogsFilter, + RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter, + RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, + RpcTransactionLogsFilter, }, rpc_response::{ - Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult, - SlotInfo, SlotUpdate, + Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, + RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, }, }, log::*, @@ -173,6 +174,12 @@ pub type SignatureSubscription = ( Receiver>, ); +pub type PubsubBlockClientSubscription = PubsubClientSubscription>; +pub type BlockSubscription = ( + PubsubBlockClientSubscription, + Receiver>, +); + pub type PubsubProgramClientSubscription = PubsubClientSubscription>; pub type ProgramSubscription = ( PubsubProgramClientSubscription, @@ -185,6 +192,9 @@ pub type AccountSubscription = ( Receiver>, ); +pub type PubsubVoteClientSubscription = PubsubClientSubscription; +pub type VoteSubscription = (PubsubVoteClientSubscription, Receiver); + pub type PubsubRootClientSubscription = PubsubClientSubscription; pub type RootSubscription = (PubsubRootClientSubscription, Receiver); @@ -266,6 +276,45 @@ impl PubsubClient { Ok((result, receiver)) } + pub fn block_subscribe( + url: &str, + filter: RpcBlockSubscribeFilter, + config: Option, + ) -> Result { + let url = Url::parse(url)?; + let socket = connect_with_retry(url)?; + let (sender, receiver) = channel(); + + let socket = Arc::new(RwLock::new(socket)); + let socket_clone = socket.clone(); + let exit = Arc::new(AtomicBool::new(false)); + let exit_clone = exit.clone(); + let body = json!({ + "jsonrpc":"2.0", + "id":1, + "method":"blockSubscribe", + "params":[filter, config] + }) + .to_string(); + + let subscription_id = PubsubBlockClientSubscription::send_subscribe(&socket_clone, body)?; + + let t_cleanup = std::thread::spawn(move || { + Self::cleanup_with_sender(exit_clone, &socket_clone, sender) + }); + + let result = PubsubClientSubscription { + message_type: PhantomData, + operation: "blocks", + socket, + subscription_id, + t_cleanup: Some(t_cleanup), + exit, + }; + + Ok((result, receiver)) + } + pub fn logs_subscribe( url: &str, filter: RpcTransactionLogsFilter, @@ -346,6 +395,39 @@ impl PubsubClient { Ok((result, receiver)) } + pub fn vote_subscribe(url: &str) -> Result { + let url = Url::parse(url)?; + let socket = connect_with_retry(url)?; + let (sender, receiver) = channel(); + + let socket = Arc::new(RwLock::new(socket)); + let socket_clone = socket.clone(); + let exit = Arc::new(AtomicBool::new(false)); + let exit_clone = exit.clone(); + let body = json!({ + "jsonrpc":"2.0", + "id":1, + "method":"voteSubscribe", + }) + .to_string(); + let subscription_id = PubsubVoteClientSubscription::send_subscribe(&socket_clone, body)?; + + let t_cleanup = std::thread::spawn(move || { + Self::cleanup_with_sender(exit_clone, &socket_clone, sender) + }); + + let result = PubsubClientSubscription { + message_type: PhantomData, + operation: "vote", + socket, + subscription_id, + t_cleanup: Some(t_cleanup), + exit, + }; + + Ok((result, receiver)) + } + pub fn root_subscribe(url: &str) -> Result { let url = Url::parse(url)?; let socket = connect_with_retry(url)?; diff --git a/client/src/rpc_client.rs b/client/src/rpc_client.rs index afccbb7ba5c915..e36169a85f87ca 100644 --- a/client/src/rpc_client.rs +++ b/client/src/rpc_client.rs @@ -1329,7 +1329,7 @@ impl RpcClient { /// # Ok::<(), ClientError>(()) /// ``` pub fn get_highest_snapshot_slot(&self) -> ClientResult { - if self.get_node_version()? < semver::Version::new(1, 8, 0) { + if self.get_node_version()? < semver::Version::new(1, 9, 0) { #[allow(deprecated)] self.get_snapshot_slot().map(|full| RpcSnapshotSlotInfo { full, @@ -4747,7 +4747,7 @@ impl RpcClient { commitment: CommitmentConfig, ) -> ClientResult<(Hash, u64)> { let (blockhash, last_valid_block_height) = - if self.get_node_version()? < semver::Version::new(1, 8, 0) { + if self.get_node_version()? < semver::Version::new(1, 9, 0) { let Fees { blockhash, last_valid_block_height, @@ -4781,7 +4781,7 @@ impl RpcClient { blockhash: &Hash, commitment: CommitmentConfig, ) -> ClientResult { - let result = if self.get_node_version()? < semver::Version::new(1, 8, 0) { + let result = if self.get_node_version()? < semver::Version::new(1, 9, 0) { self.get_fee_calculator_for_blockhash_with_commitment(blockhash, commitment)? .value .is_some() diff --git a/client/src/rpc_config.rs b/client/src/rpc_config.rs index 93d78f47caad29..edf3dc819877d8 100644 --- a/client/src/rpc_config.rs +++ b/client/src/rpc_config.rs @@ -182,6 +182,23 @@ pub struct RpcSignatureSubscribeConfig { pub enable_received_notification: Option, } +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum RpcBlockSubscribeFilter { + All, + MentionsAccountOrProgram(String), +} + +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockSubscribeConfig { + #[serde(flatten)] + pub commitment: Option, + pub encoding: Option, + pub transaction_details: Option, + pub show_rewards: Option, +} + #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct RpcSignaturesForAddressConfig { diff --git a/client/src/rpc_response.rs b/client/src/rpc_response.rs index 81146a1f0daa58..a576c6168e43fd 100644 --- a/client/src/rpc_response.rs +++ b/client/src/rpc_response.rs @@ -9,9 +9,10 @@ use { transaction::{Result, TransactionError}, }, solana_transaction_status::{ - ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, + ConfirmedTransactionStatusWithSignature, TransactionConfirmationStatus, UiConfirmedBlock, }, std::{collections::HashMap, fmt, net::SocketAddr}, + thiserror::Error, }; pub type RpcResult = client_error::Result>; @@ -424,6 +425,20 @@ pub struct RpcInflationReward { pub commission: Option, // Vote account commission when the reward was credited } +#[derive(Clone, Deserialize, Serialize, Debug, Error, Eq, PartialEq)] +pub enum RpcBlockUpdateError { + #[error("block store error")] + BlockStoreError, +} + +#[derive(Serialize, Deserialize, Debug)] +#[serde(rename_all = "camelCase")] +pub struct RpcBlockUpdate { + pub slot: Slot, + pub block: Option, + pub err: Option, +} + impl From for RpcConfirmedTransactionStatusWithSignature { fn from(value: ConfirmedTransactionStatusWithSignature) -> Self { let ConfirmedTransactionStatusWithSignature { diff --git a/core/Cargo.toml b/core/Cargo.toml index 05f1e30204e53f..447a674a94a447 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-core" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-core" readme = "../README.md" @@ -26,7 +26,7 @@ fs_extra = "1.2.0" histogram = "0.6.9" itertools = "0.10.1" log = "0.4.14" -lru = "0.7.0" +lru = "0.7.1" rand = "0.7.0" rand_chacha = "0.2.2" raptorq = "1.6.4" @@ -34,30 +34,30 @@ rayon = "1.5.1" retain_mut = "0.1.5" serde = "1.0.130" serde_derive = "1.0.103" -solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-poh = { path = "../poh", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-accountsdb-plugin-manager = { path = "../accountsdb-plugin-manager", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-poh = { path = "../poh", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-replica-lib = { path = "../replica-lib", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } tempfile = "3.2.0" thiserror = "1.0" -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } sys-info = "0.9.1" tokio = { version = "1", features = ["full"] } trees = "0.4.2" @@ -71,9 +71,9 @@ matches = "0.1.9" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } serde_json = "1.0.72" serial_test = "0.5.1" -solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } static_assertions = "1.1.0" systemstat = "0.1.10" diff --git a/core/benches/banking_stage.rs b/core/benches/banking_stage.rs index 1b9e79a8568482..dbb0961af114d8 100644 --- a/core/benches/banking_stage.rs +++ b/core/benches/banking_stage.rs @@ -20,7 +20,7 @@ use { genesis_utils::{create_genesis_config, GenesisConfigInfo}, get_tmp_ledger_path, }, - solana_perf::{packet::to_packets_chunked, test_tx::test_tx}, + solana_perf::{packet::to_packet_batches, test_tx::test_tx}, solana_poh::poh_recorder::{create_test_recorder, WorkingBankEntry}, solana_runtime::{bank::Bank, cost_model::CostModel}, solana_sdk::{ @@ -77,11 +77,11 @@ fn bench_consume_buffered(bencher: &mut Bencher) { let tx = test_tx(); let len = 4096; let chunk_size = 1024; - let batches = to_packets_chunked(&vec![tx; len], chunk_size); - let mut packets = VecDeque::new(); + let batches = to_packet_batches(&vec![tx; len], chunk_size); + let mut packet_batches = VecDeque::new(); for batch in batches { let batch_len = batch.packets.len(); - packets.push_back((batch, vec![0usize; batch_len], false)); + packet_batches.push_back((batch, vec![0usize; batch_len], false)); } let (s, _r) = unbounded(); // This tests the performance of buffering packets. @@ -91,7 +91,7 @@ fn bench_consume_buffered(bencher: &mut Bencher) { &my_pubkey, std::u128::MAX, &poh_recorder, - &mut packets, + &mut packet_batches, None, &s, None::>, @@ -206,7 +206,7 @@ fn bench_banking(bencher: &mut Bencher, tx_type: TransactionType) { assert!(r.is_ok(), "sanity parallel execution"); } bank.clear_signatures(); - let verified: Vec<_> = to_packets_chunked(&transactions, PACKETS_PER_BATCH); + let verified: Vec<_> = to_packet_batches(&transactions, PACKETS_PER_BATCH); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new( diff --git a/core/benches/retransmit_stage.rs b/core/benches/retransmit_stage.rs index 59afa0ada5db93..3ae1b605989ac3 100644 --- a/core/benches/retransmit_stage.rs +++ b/core/benches/retransmit_stage.rs @@ -100,7 +100,11 @@ fn bench_retransmitter(bencher: &mut Bencher) { let slot = 0; let parent = 0; let shredder = Shredder::new(slot, parent, 0, 0).unwrap(); - let mut data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; + let (mut data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let num_packets = data_shreds.len(); diff --git a/core/benches/shredder.rs b/core/benches/shredder.rs index d632858a8c129f..565f8ced2dda3b 100644 --- a/core/benches/shredder.rs +++ b/core/benches/shredder.rs @@ -40,16 +40,14 @@ fn make_shreds(num_shreds: usize) -> Vec { ); let entries = make_large_unchained_entries(txs_per_entry, num_entries); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - let data_shreds = shredder - .entries_to_data_shreds( - &Keypair::new(), - &entries, - true, // is_last_in_slot - 0, // next_shred_index - 0, // fec_set_offset - &mut ProcessShredsStats::default(), - ) - .0; + let data_shreds = shredder.entries_to_data_shreds( + &Keypair::new(), + &entries, + true, // is_last_in_slot + 0, // next_shred_index + 0, // fec_set_offset + &mut ProcessShredsStats::default(), + ); assert!(data_shreds.len() >= num_shreds); data_shreds } @@ -76,7 +74,7 @@ fn bench_shredder_ticks(bencher: &mut Bencher) { let entries = create_ticks(num_ticks, 0, Hash::default()); bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - shredder.entries_to_shreds(&kp, &entries, true, 0); + shredder.entries_to_shreds(&kp, &entries, true, 0, 0); }) } @@ -95,7 +93,7 @@ fn bench_shredder_large_entries(bencher: &mut Bencher) { // 1Mb bencher.iter(|| { let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - shredder.entries_to_shreds(&kp, &entries, true, 0); + shredder.entries_to_shreds(&kp, &entries, true, 0, 0); }) } @@ -108,7 +106,7 @@ fn bench_deshredder(bencher: &mut Bencher) { let num_ticks = max_ticks_per_n_shreds(1, Some(shred_size)) * num_shreds as u64; let entries = create_ticks(num_ticks, 0, Hash::default()); let shredder = Shredder::new(1, 0, 0, 0).unwrap(); - let data_shreds = shredder.entries_to_shreds(&kp, &entries, true, 0).0; + let (data_shreds, _) = shredder.entries_to_shreds(&kp, &entries, true, 0, 0); bencher.iter(|| { let raw = &mut Shredder::deshred(&data_shreds).unwrap(); assert_ne!(raw.len(), 0); @@ -135,6 +133,7 @@ fn bench_shredder_coding(bencher: &mut Bencher) { Shredder::generate_coding_shreds( &data_shreds[..symbol_count], true, // is_last_in_slot + 0, // next_code_index ) .len(); }) @@ -147,6 +146,7 @@ fn bench_shredder_decoding(bencher: &mut Bencher) { let coding_shreds = Shredder::generate_coding_shreds( &data_shreds[..symbol_count], true, // is_last_in_slot + 0, // next_code_index ); bencher.iter(|| { Shredder::try_recovery(coding_shreds[..].to_vec()).unwrap(); diff --git a/core/benches/sigverify_stage.rs b/core/benches/sigverify_stage.rs index e48ab9301c1690..6755db61eb9b02 100644 --- a/core/benches/sigverify_stage.rs +++ b/core/benches/sigverify_stage.rs @@ -8,7 +8,7 @@ use { log::*, rand::{thread_rng, Rng}, solana_core::{sigverify::TransactionSigVerifier, sigverify_stage::SigVerifyStage}, - solana_perf::{packet::to_packets_chunked, test_tx::test_tx}, + solana_perf::{packet::to_packet_batches, test_tx::test_tx}, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, @@ -28,7 +28,7 @@ fn bench_packet_discard(bencher: &mut Bencher) { let len = 30 * 1000; let chunk_size = 1024; let tx = test_tx(); - let mut batches = to_packets_chunked(&vec![tx; len], chunk_size); + let mut batches = to_packet_batches(&vec![tx; len], chunk_size); let mut total = 0; @@ -37,7 +37,7 @@ fn bench_packet_discard(bencher: &mut Bencher) { .map(|_| { let mut addr = [0u16; 8]; thread_rng().fill(&mut addr); - addr + std::net::IpAddr::from(addr) }) .collect(); @@ -54,7 +54,7 @@ fn bench_packet_discard(bencher: &mut Bencher) { SigVerifyStage::discard_excess_packets(&mut batches, 10_000); for batch in batches.iter_mut() { for p in batch.packets.iter_mut() { - p.meta.discard = false; + p.meta.set_discard(false); } } }); @@ -74,7 +74,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) { let chunk_size = 1024; let mut batches = if use_same_tx { let tx = test_tx(); - to_packets_chunked(&vec![tx; len], chunk_size) + to_packet_batches(&vec![tx; len], chunk_size) } else { let from_keypair = Keypair::new(); let to_keypair = Keypair::new(); @@ -89,7 +89,7 @@ fn bench_sigverify_stage(bencher: &mut Bencher) { ) }) .collect(); - to_packets_chunked(&txs, chunk_size) + to_packet_batches(&txs, chunk_size) }; trace!( diff --git a/core/src/ancestor_hashes_service.rs b/core/src/ancestor_hashes_service.rs index c824036cd85fed..9ce9a5efb2d78e 100644 --- a/core/src/ancestor_hashes_service.rs +++ b/core/src/ancestor_hashes_service.rs @@ -14,7 +14,7 @@ use { solana_ledger::{blockstore::Blockstore, shred::SIZE_OF_NONCE}, solana_measure::measure::Measure, solana_perf::{ - packet::{limited_deserialize, Packet, Packets}, + packet::{limited_deserialize, Packet, PacketBatch}, recycler::Recycler, }, solana_runtime::bank::Bank, @@ -23,7 +23,7 @@ use { pubkey::Pubkey, timing::timestamp, }, - solana_streamer::streamer::{self, PacketReceiver}, + solana_streamer::streamer::{self, PacketBatchReceiver}, std::{ collections::HashSet, net::UdpSocket, @@ -197,7 +197,7 @@ impl AncestorHashesService { /// Listen for responses to our ancestors hashes repair requests fn run_responses_listener( ancestor_hashes_request_statuses: Arc>, - response_receiver: PacketReceiver, + response_receiver: PacketBatchReceiver, blockstore: Arc, outstanding_requests: Arc>, exit: Arc, @@ -240,7 +240,7 @@ impl AncestorHashesService { /// Process messages from the network fn process_new_packets_from_channel( ancestor_hashes_request_statuses: &DashMap, - response_receiver: &PacketReceiver, + response_receiver: &PacketBatchReceiver, blockstore: &Blockstore, outstanding_requests: &RwLock, stats: &mut AncestorHashesResponsesStats, @@ -249,17 +249,17 @@ impl AncestorHashesService { retryable_slots_sender: &RetryableSlotsSender, ) -> Result<()> { let timeout = Duration::new(1, 0); - let mut responses = vec![response_receiver.recv_timeout(timeout)?]; - let mut total_packets = responses[0].packets.len(); + let mut packet_batches = vec![response_receiver.recv_timeout(timeout)?]; + let mut total_packets = packet_batches[0].packets.len(); let mut dropped_packets = 0; - while let Ok(more) = response_receiver.try_recv() { - total_packets += more.packets.len(); + while let Ok(batch) = response_receiver.try_recv() { + total_packets += batch.packets.len(); if total_packets < *max_packets { // Drop the rest in the channel in case of DOS - responses.push(more); + packet_batches.push(batch); } else { - dropped_packets += more.packets.len(); + dropped_packets += batch.packets.len(); } } @@ -267,10 +267,10 @@ impl AncestorHashesService { stats.total_packets += total_packets; let mut time = Measure::start("ancestor_hashes::handle_packets"); - for response in responses { - Self::process_single_packets( + for packet_batch in packet_batches { + Self::process_packet_batch( ancestor_hashes_request_statuses, - response, + packet_batch, stats, outstanding_requests, blockstore, @@ -289,16 +289,16 @@ impl AncestorHashesService { Ok(()) } - fn process_single_packets( + fn process_packet_batch( ancestor_hashes_request_statuses: &DashMap, - packets: Packets, + packet_batch: PacketBatch, stats: &mut AncestorHashesResponsesStats, outstanding_requests: &RwLock, blockstore: &Blockstore, duplicate_slots_reset_sender: &DuplicateSlotsResetSender, retryable_slots_sender: &RetryableSlotsSender, ) { - packets.packets.iter().for_each(|packet| { + packet_batch.packets.iter().for_each(|packet| { let decision = Self::verify_and_process_ancestor_response( packet, ancestor_hashes_request_statuses, @@ -328,7 +328,7 @@ impl AncestorHashesService { blockstore: &Blockstore, ) -> Option<(Slot, DuplicateAncestorDecision)> { let from_addr = packet.meta.addr(); - limited_deserialize(&packet.data[..packet.meta.size - SIZE_OF_NONCE]) + limited_deserialize(&packet.data[..packet.meta.size.saturating_sub(SIZE_OF_NONCE)]) .ok() .and_then(|ancestor_hashes_response| { // Verify the response @@ -871,7 +871,7 @@ mod test { t_listen: JoinHandle<()>, exit: Arc, responder_info: ContactInfo, - response_receiver: PacketReceiver, + response_receiver: PacketBatchReceiver, correct_bank_hashes: HashMap, } @@ -1033,15 +1033,6 @@ mod test { is_frozen, ); - /*{ - let w_bank_forks = bank_forks.write().unwrap(); - assert!(w_bank_forks.get(dead_slot).is_none()); - let parent = w_bank_forks.get(dead_slot - 1).unwrap().clone(); - let dead_bank = Bank::new_from_parent(&parent, &Pubkey::default(), dead_slot); - bank_forks.insert(dead_bank); - - }*/ - // Create slots [slot, slot + num_ancestors) with 5 shreds apiece let (shreds, _) = make_many_slot_entries(dead_slot, dead_slot, 5); blockstore @@ -1369,6 +1360,34 @@ mod test { assert!(ancestor_hashes_request_statuses.is_empty()); } + #[test] + fn test_verify_and_process_ancestor_responses_invalid_packet() { + let bank0 = Bank::default_for_tests(); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0))); + + let ManageAncestorHashesState { + ancestor_hashes_request_statuses, + outstanding_requests, + .. + } = ManageAncestorHashesState::new(bank_forks); + + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + + // Create invalid packet with fewer bytes than the size of the nonce + let mut packet = Packet::default(); + packet.meta.size = 0; + + assert!(AncestorHashesService::verify_and_process_ancestor_response( + &packet, + &ancestor_hashes_request_statuses, + &mut AncestorHashesResponsesStats::default(), + &outstanding_requests, + &blockstore, + ) + .is_none()); + } + #[test] fn test_ancestor_hashes_service_manage_ancestor_hashes_after_replay_dump() { let dead_slot = MAX_ANCESTOR_RESPONSES as Slot; diff --git a/core/src/banking_stage.rs b/core/src/banking_stage.rs index d1c7e5eb086eec..2fd74d6a8b1656 100644 --- a/core/src/banking_stage.rs +++ b/core/src/banking_stage.rs @@ -15,7 +15,7 @@ use { solana_perf::{ cuda_runtime::PinnedVec, data_budget::DataBudget, - packet::{limited_deserialize, Packet, Packets, PACKETS_PER_BATCH}, + packet::{limited_deserialize, Packet, PacketBatch, PACKETS_PER_BATCH}, perf_libs, }, solana_poh::poh_recorder::{BankStart, PohRecorder, PohRecorderError, TransactionRecorder}, @@ -64,10 +64,10 @@ use { }; /// (packets, valid_indexes, forwarded) -/// Set of packets with a list of which are valid and if this batch has been forwarded. -type PacketsAndOffsets = (Packets, Vec, bool); +/// Batch of packets with a list of which are valid and if this batch has been forwarded. +type PacketBatchAndOffsets = (PacketBatch, Vec, bool); -pub type UnprocessedPackets = VecDeque; +pub type UnprocessedPacketBatches = VecDeque; /// Transaction forwarding pub const FORWARD_TRANSACTIONS_TO_LEADER_AT_SLOT_OFFSET: u64 = 2; @@ -119,7 +119,42 @@ impl BankingStageStats { } } + fn is_empty(&self) -> bool { + 0 == self.process_packets_count.load(Ordering::Relaxed) as u64 + + self.new_tx_count.load(Ordering::Relaxed) as u64 + + self.dropped_packet_batches_count.load(Ordering::Relaxed) as u64 + + self.dropped_packets_count.load(Ordering::Relaxed) as u64 + + self + .dropped_duplicated_packets_count + .load(Ordering::Relaxed) as u64 + + self.newly_buffered_packets_count.load(Ordering::Relaxed) as u64 + + self.current_buffered_packets_count.load(Ordering::Relaxed) as u64 + + self + .current_buffered_packet_batches_count + .load(Ordering::Relaxed) as u64 + + self.rebuffered_packets_count.load(Ordering::Relaxed) as u64 + + self.consumed_buffered_packets_count.load(Ordering::Relaxed) as u64 + + self + .consume_buffered_packets_elapsed + .load(Ordering::Relaxed) + + self.process_packets_elapsed.load(Ordering::Relaxed) + + self + .handle_retryable_packets_elapsed + .load(Ordering::Relaxed) + + self.filter_pending_packets_elapsed.load(Ordering::Relaxed) + + self.packet_duplicate_check_elapsed.load(Ordering::Relaxed) + + self.packet_conversion_elapsed.load(Ordering::Relaxed) + + self + .unprocessed_packet_conversion_elapsed + .load(Ordering::Relaxed) + + self.transaction_processing_elapsed.load(Ordering::Relaxed) + } + fn report(&self, report_interval_ms: u64) { + // skip repoting metrics if stats is empty + if self.is_empty() { + return; + } if self.last_report.should_update(report_interval_ms) { datapoint_info!( "banking_stage-loop-stats", @@ -255,9 +290,9 @@ impl BankingStage { pub fn new( cluster_info: &Arc, poh_recorder: &Arc>, - verified_receiver: CrossbeamReceiver>, - tpu_verified_vote_receiver: CrossbeamReceiver>, - verified_vote_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, + tpu_verified_vote_receiver: CrossbeamReceiver>, + verified_vote_receiver: CrossbeamReceiver>, transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, cost_model: Arc>, @@ -278,9 +313,9 @@ impl BankingStage { fn new_num_threads( cluster_info: &Arc, poh_recorder: &Arc>, - verified_receiver: CrossbeamReceiver>, - tpu_verified_vote_receiver: CrossbeamReceiver>, - verified_vote_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, + tpu_verified_vote_receiver: CrossbeamReceiver>, + verified_vote_receiver: CrossbeamReceiver>, num_threads: u32, transaction_status_sender: Option, gossip_vote_sender: ReplayVoteSender, @@ -346,12 +381,12 @@ impl BankingStage { } fn filter_valid_packets_for_forwarding<'a>( - all_packets: impl Iterator, + packet_batches: impl Iterator, ) -> Vec<&'a Packet> { - all_packets - .filter(|(_p, _indexes, forwarded)| !forwarded) - .flat_map(|(p, valid_indexes, _forwarded)| { - valid_indexes.iter().map(move |x| &p.packets[*x]) + packet_batches + .filter(|(_batch, _indexes, forwarded)| !forwarded) + .flat_map(|(batch, valid_indexes, _forwarded)| { + valid_indexes.iter().map(move |x| &batch.packets[*x]) }) .collect() } @@ -359,33 +394,44 @@ impl BankingStage { fn forward_buffered_packets( socket: &std::net::UdpSocket, tpu_forwards: &std::net::SocketAddr, - unprocessed_packets: &UnprocessedPackets, + buffered_packet_batches: &UnprocessedPacketBatches, data_budget: &DataBudget, ) -> std::io::Result<()> { - let packets = Self::filter_valid_packets_for_forwarding(unprocessed_packets.iter()); - inc_new_counter_info!("banking_stage-forwarded_packets", packets.len()); + let packets = Self::filter_valid_packets_for_forwarding(buffered_packet_batches.iter()); const INTERVAL_MS: u64 = 100; const MAX_BYTES_PER_SECOND: usize = 10_000 * 1200; const MAX_BYTES_PER_INTERVAL: usize = MAX_BYTES_PER_SECOND * INTERVAL_MS as usize / 1000; const MAX_BYTES_BUDGET: usize = MAX_BYTES_PER_INTERVAL * 5; data_budget.update(INTERVAL_MS, |bytes| { - std::cmp::min(bytes + MAX_BYTES_PER_INTERVAL, MAX_BYTES_BUDGET) + std::cmp::min( + bytes.saturating_add(MAX_BYTES_PER_INTERVAL), + MAX_BYTES_BUDGET, + ) }); - let mut packet_vec = Vec::with_capacity(packets.len()); - for p in packets { - if data_budget.take(p.meta.size) { - packet_vec.push((&p.data[..p.meta.size], tpu_forwards)); + let packet_vec: Vec<_> = packets + .iter() + .filter_map(|p| { + if !p.meta.forwarded() && data_budget.take(p.meta.size) { + Some((&p.data[..p.meta.size], tpu_forwards)) + } else { + None + } + }) + .collect(); + + if !packet_vec.is_empty() { + inc_new_counter_info!("banking_stage-forwarded_packets", packet_vec.len()); + if let Err(SendPktsError::IoError(ioerr, _num_failed)) = batch_send(socket, &packet_vec) + { + return Err(ioerr); } } - if let Err(SendPktsError::IoError(ioerr, _num_failed)) = batch_send(socket, &packet_vec) { - return Err(ioerr); - } Ok(()) } - // Returns whether the given `Packets` has any more remaining unprocessed + // Returns whether the given `PacketBatch` has any more remaining unprocessed // transactions fn update_buffered_packets_with_new_unprocessed( original_unprocessed_indexes: &mut Vec, @@ -404,7 +450,7 @@ impl BankingStage { my_pubkey: &Pubkey, max_tx_ingestion_ns: u128, poh_recorder: &Arc>, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, test_fn: Option, @@ -412,19 +458,21 @@ impl BankingStage { recorder: &TransactionRecorder, qos_service: &Arc, ) { - let mut rebuffered_packets_len = 0; + let mut rebuffered_packet_count = 0; let mut new_tx_count = 0; - let buffered_len = buffered_packets.len(); + let buffered_packet_batches_len = buffered_packet_batches.len(); let mut proc_start = Measure::start("consume_buffered_process"); let mut reached_end_of_slot = None; - buffered_packets.retain_mut(|(msgs, ref mut original_unprocessed_indexes, _forwarded)| { + buffered_packet_batches.retain_mut(|buffered_packet_batch_and_offsets| { + let (packet_batch, ref mut original_unprocessed_indexes, _forwarded) = + buffered_packet_batch_and_offsets; if let Some((next_leader, bank)) = &reached_end_of_slot { // We've hit the end of this slot, no need to perform more processing, // just filter the remaining packets for the invalid (e.g. too old) ones let new_unprocessed_indexes = Self::filter_unprocessed_packets( bank, - msgs, + packet_batch, original_unprocessed_indexes, my_pubkey, *next_leader, @@ -446,7 +494,7 @@ impl BankingStage { &working_bank, &bank_creation_time, recorder, - msgs, + packet_batch, original_unprocessed_indexes.to_owned(), transaction_status_sender.clone(), gossip_vote_sender, @@ -467,7 +515,7 @@ impl BankingStage { new_tx_count += processed; // Out of the buffered packets just retried, collect any still unprocessed // transactions in this batch for forwarding - rebuffered_packets_len += new_unprocessed_indexes.len(); + rebuffered_packet_count += new_unprocessed_indexes.len(); let has_more_unprocessed_transactions = Self::update_buffered_packets_with_new_unprocessed( original_unprocessed_indexes, @@ -478,7 +526,7 @@ impl BankingStage { } has_more_unprocessed_transactions } else { - rebuffered_packets_len += original_unprocessed_indexes.len(); + rebuffered_packet_count += original_unprocessed_indexes.len(); // `original_unprocessed_indexes` must have remaining packets to process // if not yet processed. assert!(Self::packet_has_more_unprocessed_transactions( @@ -494,7 +542,7 @@ impl BankingStage { debug!( "@{:?} done processing buffered batches: {} time: {:?}ms tx count: {} tx/s: {}", timestamp(), - buffered_len, + buffered_packet_batches_len, proc_start.as_ms(), new_tx_count, (new_tx_count as f32) / (proc_start.as_s()) @@ -505,7 +553,7 @@ impl BankingStage { .fetch_add(proc_start.as_us(), Ordering::Relaxed); banking_stage_stats .rebuffered_packets_count - .fetch_add(rebuffered_packets_len, Ordering::Relaxed); + .fetch_add(rebuffered_packet_count, Ordering::Relaxed); banking_stage_stats .consumed_buffered_packets_count .fetch_add(new_tx_count, Ordering::Relaxed); @@ -550,7 +598,7 @@ impl BankingStage { socket: &std::net::UdpSocket, poh_recorder: &Arc>, cluster_info: &ClusterInfo, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, forward_option: &ForwardOption, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -592,7 +640,7 @@ impl BankingStage { my_pubkey, max_tx_ingestion_ns, poh_recorder, - buffered_packets, + buffered_packet_batches, transaction_status_sender, gossip_vote_sender, None::>, @@ -605,7 +653,7 @@ impl BankingStage { Self::handle_forwarding( forward_option, cluster_info, - buffered_packets, + buffered_packet_batches, poh_recorder, socket, false, @@ -616,7 +664,7 @@ impl BankingStage { Self::handle_forwarding( forward_option, cluster_info, - buffered_packets, + buffered_packet_batches, poh_recorder, socket, true, @@ -631,7 +679,7 @@ impl BankingStage { fn handle_forwarding( forward_option: &ForwardOption, cluster_info: &ClusterInfo, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, poh_recorder: &Arc>, socket: &UdpSocket, hold: bool, @@ -640,7 +688,7 @@ impl BankingStage { let addr = match forward_option { ForwardOption::NotForward => { if !hold { - buffered_packets.clear(); + buffered_packet_batches.clear(); } return; } @@ -653,20 +701,20 @@ impl BankingStage { Some(addr) => addr, None => return, }; - let _ = Self::forward_buffered_packets(socket, &addr, buffered_packets, data_budget); + let _ = Self::forward_buffered_packets(socket, &addr, buffered_packet_batches, data_budget); if hold { - buffered_packets.retain(|(_, index, _)| !index.is_empty()); - for (_, _, forwarded) in buffered_packets.iter_mut() { + buffered_packet_batches.retain(|(_, index, _)| !index.is_empty()); + for (_, _, forwarded) in buffered_packet_batches.iter_mut() { *forwarded = true; } } else { - buffered_packets.clear(); + buffered_packet_batches.clear(); } } #[allow(clippy::too_many_arguments)] fn process_loop( - verified_receiver: &CrossbeamReceiver>, + verified_receiver: &CrossbeamReceiver>, poh_recorder: &Arc>, cluster_info: &ClusterInfo, recv_start: &mut Instant, @@ -681,17 +729,17 @@ impl BankingStage { ) { let recorder = poh_recorder.lock().unwrap().recorder(); let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let mut buffered_packets = VecDeque::with_capacity(batch_limit); + let mut buffered_packet_batches = VecDeque::with_capacity(batch_limit); let banking_stage_stats = BankingStageStats::new(id); loop { let my_pubkey = cluster_info.id(); - while !buffered_packets.is_empty() { + while !buffered_packet_batches.is_empty() { let decision = Self::process_buffered_packets( &my_pubkey, &socket, poh_recorder, cluster_info, - &mut buffered_packets, + &mut buffered_packet_batches, &forward_option, transaction_status_sender.clone(), &gossip_vote_sender, @@ -709,7 +757,7 @@ impl BankingStage { } } - let recv_timeout = if !buffered_packets.is_empty() { + let recv_timeout = if !buffered_packet_batches.is_empty() { // If packets are buffered, let's wait for less time on recv from the channel. // This helps detect the next leader faster, and processing the buffered // packets quickly @@ -729,7 +777,7 @@ impl BankingStage { batch_limit, transaction_status_sender.clone(), &gossip_vote_sender, - &mut buffered_packets, + &mut buffered_packet_batches, &banking_stage_stats, duplicates, &recorder, @@ -756,22 +804,23 @@ impl BankingStage { fn record_transactions( bank_slot: Slot, txs: &[SanitizedTransaction], - results: &[TransactionExecutionResult], + execution_results: &[TransactionExecutionResult], recorder: &TransactionRecorder, ) -> (Result, Vec) { let mut processed_generation = Measure::start("record::process_generation"); - let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = results - .iter() - .zip(txs) - .enumerate() - .filter_map(|(i, ((r, _n), tx))| { - if Bank::can_commit(r) { - Some((tx.to_versioned_transaction(), i)) - } else { - None - } - }) - .unzip(); + let (processed_transactions, processed_transactions_indexes): (Vec<_>, Vec<_>) = + execution_results + .iter() + .zip(txs) + .enumerate() + .filter_map(|(i, (execution_result, tx))| { + if execution_result.was_executed() { + Some((tx.to_versioned_transaction(), i)) + } else { + None + } + }) + .unzip(); processed_generation.stop(); let num_to_commit = processed_transactions.len(); @@ -837,28 +886,25 @@ impl BankingStage { }; let mut execute_timings = ExecuteTimings::default(); - let ( - mut loaded_accounts, - results, - inner_instructions, - transaction_logs, - mut retryable_txs, - tx_count, - signature_count, - ) = bank.load_and_execute_transactions( - batch, - MAX_PROCESSING_AGE, - transaction_status_sender.is_some(), - transaction_status_sender.is_some(), - &mut execute_timings, - ); + let (mut loaded_accounts, execution_results, mut retryable_txs, tx_count, signature_count) = + bank.load_and_execute_transactions( + batch, + MAX_PROCESSING_AGE, + transaction_status_sender.is_some(), + transaction_status_sender.is_some(), + &mut execute_timings, + ); load_execute_time.stop(); let freeze_lock = bank.freeze_lock(); let mut record_time = Measure::start("record_time"); - let (num_to_commit, retryable_record_txs) = - Self::record_transactions(bank.slot(), batch.sanitized_transactions(), &results, poh); + let (num_to_commit, retryable_record_txs) = Self::record_transactions( + bank.slot(), + batch.sanitized_transactions(), + &execution_results, + poh, + ); inc_new_counter_info!( "banking_stage-record_transactions_num_to_commit", *num_to_commit.as_ref().unwrap_or(&0) @@ -880,7 +926,7 @@ impl BankingStage { let tx_results = bank.commit_transactions( sanitized_txs, &mut loaded_accounts, - &results, + execution_results, tx_count, signature_count, &mut execute_timings, @@ -897,8 +943,6 @@ impl BankingStage { tx_results.execution_results, TransactionBalancesSet::new(pre_balances, post_balances), TransactionTokenBalancesSet::new(pre_token_balances, post_token_balances), - inner_instructions, - transaction_logs, tx_results.rent_debits, ); } @@ -933,8 +977,7 @@ impl BankingStage { gossip_vote_sender: &ReplayVoteSender, qos_service: &Arc, ) -> (Result, Vec) { - let tx_costs = - qos_service.compute_transaction_costs(txs.iter(), bank.demote_program_write_locks()); + let tx_costs = qos_service.compute_transaction_costs(txs.iter()); let transactions_qos_results = qos_service.select_transactions_per_cost(txs.iter(), tx_costs.iter(), bank); @@ -1076,7 +1119,7 @@ impl BankingStage { // with their packet indexes. #[allow(clippy::needless_collect)] fn transactions_from_packets( - msgs: &Packets, + packet_batch: &PacketBatch, transaction_indexes: &[usize], feature_set: &Arc, votes_only: bool, @@ -1084,8 +1127,8 @@ impl BankingStage { transaction_indexes .iter() .filter_map(|tx_index| { - let p = &msgs.packets[*tx_index]; - if votes_only && !p.meta.is_simple_vote_tx { + let p = &packet_batch.packets[*tx_index]; + if votes_only && !p.meta.is_simple_vote_tx() { return None; } @@ -1095,7 +1138,7 @@ impl BankingStage { let tx = SanitizedTransaction::try_create( tx, message_hash, - Some(p.meta.is_simple_vote_tx), + Some(p.meta.is_simple_vote_tx()), |_| Err(TransactionError::UnsupportedVersion), ) .ok()?; @@ -1149,7 +1192,7 @@ impl BankingStage { bank: &Arc, bank_creation_time: &Instant, poh: &TransactionRecorder, - msgs: &Packets, + packet_batch: &PacketBatch, packet_indexes: Vec, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, @@ -1158,7 +1201,7 @@ impl BankingStage { ) -> (usize, usize, Vec) { let mut packet_conversion_time = Measure::start("packet_conversion"); let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( - msgs, + packet_batch, &packet_indexes, &bank.feature_set, bank.vote_only_bank(), @@ -1214,7 +1257,7 @@ impl BankingStage { fn filter_unprocessed_packets( bank: &Arc, - msgs: &Packets, + packet_batch: &PacketBatch, transaction_indexes: &[usize], my_pubkey: &Pubkey, next_leader: Option, @@ -1232,7 +1275,7 @@ impl BankingStage { let mut unprocessed_packet_conversion_time = Measure::start("unprocessed_packet_conversion"); let (transactions, transaction_to_packet_indexes) = Self::transactions_from_packets( - msgs, + packet_batch, transaction_indexes, &bank.feature_set, bank.vote_only_bank(), @@ -1266,15 +1309,8 @@ impl BankingStage { fn generate_packet_indexes(vers: &PinnedVec) -> Vec { vers.iter() .enumerate() - .filter_map( - |(index, ver)| { - if !ver.meta.discard { - Some(index) - } else { - None - } - }, - ) + .filter(|(_, pkt)| !pkt.meta.discard()) + .map(|(index, _)| index) .collect() } @@ -1282,7 +1318,7 @@ impl BankingStage { /// Process the incoming packets fn process_packets( my_pubkey: &Pubkey, - verified_receiver: &CrossbeamReceiver>, + verified_receiver: &CrossbeamReceiver>, poh: &Arc>, recv_start: &mut Instant, recv_timeout: Duration, @@ -1290,41 +1326,41 @@ impl BankingStage { batch_limit: usize, transaction_status_sender: Option, gossip_vote_sender: &ReplayVoteSender, - buffered_packets: &mut UnprocessedPackets, + buffered_packet_batches: &mut UnprocessedPacketBatches, banking_stage_stats: &BankingStageStats, duplicates: &Arc, PacketHasher)>>, recorder: &TransactionRecorder, qos_service: &Arc, ) -> Result<(), RecvTimeoutError> { let mut recv_time = Measure::start("process_packets_recv"); - let mms = verified_receiver.recv_timeout(recv_timeout)?; + let packet_batches = verified_receiver.recv_timeout(recv_timeout)?; recv_time.stop(); - let mms_len = mms.len(); - let count: usize = mms.iter().map(|x| x.packets.len()).sum(); + let packet_batches_len = packet_batches.len(); + let packet_count: usize = packet_batches.iter().map(|x| x.packets.len()).sum(); debug!( "@{:?} process start stalled for: {:?}ms txs: {} id: {}", timestamp(), duration_as_ms(&recv_start.elapsed()), - count, + packet_count, id, ); - inc_new_counter_debug!("banking_stage-transactions_received", count); + inc_new_counter_debug!("banking_stage-transactions_received", packet_count); let mut proc_start = Measure::start("process_packets_transactions_process"); let mut new_tx_count = 0; - let mut mms_iter = mms.into_iter(); + let mut packet_batch_iter = packet_batches.into_iter(); let mut dropped_packets_count = 0; let mut dropped_packet_batches_count = 0; let mut newly_buffered_packets_count = 0; - while let Some(msgs) = mms_iter.next() { - let packet_indexes = Self::generate_packet_indexes(&msgs.packets); + while let Some(packet_batch) = packet_batch_iter.next() { + let packet_indexes = Self::generate_packet_indexes(&packet_batch.packets); let poh_recorder_bank = poh.lock().unwrap().get_poh_recorder_bank(); let working_bank_start = poh_recorder_bank.working_bank_start(); if PohRecorder::get_working_bank_if_not_expired(&working_bank_start).is_none() { Self::push_unprocessed( - buffered_packets, - msgs, + buffered_packet_batches, + packet_batch, packet_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -1347,7 +1383,7 @@ impl BankingStage { working_bank, bank_creation_time, recorder, - &msgs, + &packet_batch, packet_indexes, transaction_status_sender.clone(), gossip_vote_sender, @@ -1359,8 +1395,8 @@ impl BankingStage { // Collect any unprocessed transactions in this batch for forwarding Self::push_unprocessed( - buffered_packets, - msgs, + buffered_packet_batches, + packet_batch, unprocessed_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -1376,19 +1412,19 @@ impl BankingStage { let next_leader = poh.lock().unwrap().next_slot_leader(); // Walk thru rest of the transactions and filter out the invalid (e.g. too old) ones #[allow(clippy::while_let_on_iterator)] - while let Some(msgs) = mms_iter.next() { - let packet_indexes = Self::generate_packet_indexes(&msgs.packets); + while let Some(packet_batch) = packet_batch_iter.next() { + let packet_indexes = Self::generate_packet_indexes(&packet_batch.packets); let unprocessed_indexes = Self::filter_unprocessed_packets( working_bank, - &msgs, + &packet_batch, &packet_indexes, my_pubkey, next_leader, banking_stage_stats, ); Self::push_unprocessed( - buffered_packets, - msgs, + buffered_packet_batches, + packet_batch, unprocessed_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -1409,11 +1445,11 @@ impl BankingStage { debug!( "@{:?} done processing transaction batches: {} time: {:?}ms tx count: {} tx/s: {} total count: {} id: {}", timestamp(), - mms_len, + packet_batches_len, proc_start.as_ms(), new_tx_count, (new_tx_count as f32) / (proc_start.as_s()), - count, + packet_count, id, ); banking_stage_stats @@ -1421,7 +1457,7 @@ impl BankingStage { .fetch_add(proc_start.as_us(), Ordering::Relaxed); banking_stage_stats .process_packets_count - .fetch_add(count, Ordering::Relaxed); + .fetch_add(packet_count, Ordering::Relaxed); banking_stage_stats .new_tx_count .fetch_add(new_tx_count, Ordering::Relaxed); @@ -1436,9 +1472,12 @@ impl BankingStage { .fetch_add(newly_buffered_packets_count, Ordering::Relaxed); banking_stage_stats .current_buffered_packet_batches_count - .swap(buffered_packets.len(), Ordering::Relaxed); + .swap(buffered_packet_batches.len(), Ordering::Relaxed); banking_stage_stats.current_buffered_packets_count.swap( - buffered_packets.iter().map(|packets| packets.1.len()).sum(), + buffered_packet_batches + .iter() + .map(|packets| packets.1.len()) + .sum(), Ordering::Relaxed, ); *recv_start = Instant::now(); @@ -1446,8 +1485,8 @@ impl BankingStage { } fn push_unprocessed( - unprocessed_packets: &mut UnprocessedPackets, - packets: Packets, + unprocessed_packet_batches: &mut UnprocessedPacketBatches, + packet_batch: PacketBatch, mut packet_indexes: Vec, dropped_packet_batches_count: &mut usize, dropped_packets_count: &mut usize, @@ -1462,7 +1501,7 @@ impl BankingStage { let mut duplicates = duplicates.lock().unwrap(); let (cache, hasher) = duplicates.deref_mut(); packet_indexes.retain(|i| { - let packet_hash = hasher.hash_packet(&packets.packets[*i]); + let packet_hash = hasher.hash_packet(&packet_batch.packets[*i]); match cache.get_mut(&packet_hash) { Some(_hash) => false, None => { @@ -1483,14 +1522,14 @@ impl BankingStage { ); } if Self::packet_has_more_unprocessed_transactions(&packet_indexes) { - if unprocessed_packets.len() >= batch_limit { + if unprocessed_packet_batches.len() >= batch_limit { *dropped_packet_batches_count += 1; - if let Some(dropped_batch) = unprocessed_packets.pop_front() { + if let Some(dropped_batch) = unprocessed_packet_batches.pop_front() { *dropped_packets_count += dropped_batch.1.len(); } } *newly_buffered_packets_count += packet_indexes.len(); - unprocessed_packets.push_back((packets, packet_indexes, false)); + unprocessed_packet_batches.push_back((packet_batch, packet_indexes, false)); } } @@ -1560,12 +1599,13 @@ mod tests { get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, }, - solana_perf::packet::to_packets_chunked, + solana_perf::packet::{to_packet_batches, PacketFlags}, solana_poh::{ poh_recorder::{create_test_recorder, Record, WorkingBankEntry}, poh_service::PohService, }, solana_rpc::transaction_status_service::TransactionStatusService, + solana_runtime::bank::TransactionExecutionDetails, solana_sdk::{ hash::Hash, instruction::InstructionError, @@ -1575,7 +1615,7 @@ mod tests { system_transaction, transaction::{Transaction, TransactionError}, }, - solana_streamer::socket::SocketAddrSpace, + solana_streamer::{recvmmsg::recv_mmsg, socket::SocketAddrSpace}, solana_transaction_status::TransactionWithStatusMeta, solana_vote_program::vote_transaction, std::{ @@ -1597,6 +1637,15 @@ mod tests { ) } + fn new_execution_result(status: Result<(), TransactionError>) -> TransactionExecutionResult { + TransactionExecutionResult::Executed(TransactionExecutionDetails { + status, + log_messages: None, + inner_instructions: None, + durable_nonce_fee: None, + }) + } + #[test] fn test_banking_stage_shutdown1() { let genesis_config = create_genesis_config(2).genesis_config; @@ -1697,12 +1746,14 @@ mod tests { Blockstore::destroy(&ledger_path).unwrap(); } - pub fn convert_from_old_verified(mut with_vers: Vec<(Packets, Vec)>) -> Vec { + pub fn convert_from_old_verified( + mut with_vers: Vec<(PacketBatch, Vec)>, + ) -> Vec { with_vers.iter_mut().for_each(|(b, v)| { b.packets .iter_mut() .zip(v) - .for_each(|(p, f)| p.meta.discard = *f == 0) + .for_each(|(p, f)| p.meta.set_discard(*f == 0)) }); with_vers.into_iter().map(|(b, _)| b).collect() } @@ -1769,18 +1820,18 @@ mod tests { let tx_anf = system_transaction::transfer(&keypair, &to3, 1, start_hash); // send 'em over - let packets = to_packets_chunked(&[tx_no_ver, tx_anf, tx], 3); + let packet_batches = to_packet_batches(&[tx_no_ver, tx_anf, tx], 3); // glad they all fit - assert_eq!(packets.len(), 1); + assert_eq!(packet_batches.len(), 1); - let packets = packets + let packet_batches = packet_batches .into_iter() - .map(|packets| (packets, vec![0u8, 1u8, 1u8])) + .map(|batch| (batch, vec![0u8, 1u8, 1u8])) .collect(); - let packets = convert_from_old_verified(packets); + let packet_batches = convert_from_old_verified(packet_batches); verified_sender // no_ver, anf, tx - .send(packets) + .send(packet_batches) .unwrap(); drop(verified_sender); @@ -1846,24 +1897,24 @@ mod tests { let tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 2, genesis_config.hash()); - let packets = to_packets_chunked(&[tx], 1); - let packets = packets + let packet_batches = to_packet_batches(&[tx], 1); + let packet_batches = packet_batches .into_iter() - .map(|packets| (packets, vec![1u8])) + .map(|batch| (batch, vec![1u8])) .collect(); - let packets = convert_from_old_verified(packets); - verified_sender.send(packets).unwrap(); + let packet_batches = convert_from_old_verified(packet_batches); + verified_sender.send(packet_batches).unwrap(); // Process a second batch that uses the same from account, so conflicts with above TX let tx = system_transaction::transfer(&mint_keypair, &alice.pubkey(), 1, genesis_config.hash()); - let packets = to_packets_chunked(&[tx], 1); - let packets = packets + let packet_batches = to_packet_batches(&[tx], 1); + let packet_batches = packet_batches .into_iter() - .map(|packets| (packets, vec![1u8])) + .map(|batch| (batch, vec![1u8])) .collect(); - let packets = convert_from_old_verified(packets); - verified_sender.send(packets).unwrap(); + let packet_batches = convert_from_old_verified(packet_batches); + verified_sender.send(packet_batches).unwrap(); let (vote_sender, vote_receiver) = unbounded(); let (tpu_vote_sender, tpu_vote_receiver) = unbounded(); @@ -1982,19 +2033,16 @@ mod tests { system_transaction::transfer(&keypair2, &pubkey2, 1, genesis_config.hash()), ]); - let mut results = vec![(Ok(()), None), (Ok(()), None)]; + let mut results = vec![new_execution_result(Ok(())); 2]; let _ = BankingStage::record_transactions(bank.slot(), &txs, &results, &recorder); let (_bank, (entry, _tick_height)) = entry_receiver.recv().unwrap(); assert_eq!(entry.transactions.len(), txs.len()); // InstructionErrors should still be recorded - results[0] = ( - Err(TransactionError::InstructionError( - 1, - SystemError::ResultWithNegativeLamports.into(), - )), - None, - ); + results[0] = new_execution_result(Err(TransactionError::InstructionError( + 1, + SystemError::ResultWithNegativeLamports.into(), + ))); let (res, retryable) = BankingStage::record_transactions(bank.slot(), &txs, &results, &recorder); res.unwrap(); @@ -2003,7 +2051,7 @@ mod tests { assert_eq!(entry.transactions.len(), txs.len()); // Other TransactionErrors should not be recorded - results[0] = (Err(TransactionError::AccountNotFound), None); + results[0] = TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound); let (res, retryable) = BankingStage::record_transactions(bank.slot(), &txs, &results, &recorder); res.unwrap(); @@ -2381,9 +2429,9 @@ mod tests { fn test_filter_valid_packets() { solana_logger::setup(); - let mut all_packets = (0..16) + let mut packet_batches = (0..16) .map(|packets_id| { - let packets = Packets::new( + let packet_batch = PacketBatch::new( (0..32) .map(|packet_id| { let mut p = Packet::default(); @@ -2395,11 +2443,11 @@ mod tests { let valid_indexes = (0..32) .filter_map(|x| if x % 2 != 0 { Some(x as usize) } else { None }) .collect_vec(); - (packets, valid_indexes, false) + (packet_batch, valid_indexes, false) }) .collect_vec(); - let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter()); + let result = BankingStage::filter_valid_packets_for_forwarding(packet_batches.iter()); assert_eq!(result.len(), 256); @@ -2413,8 +2461,8 @@ mod tests { }) .collect_vec(); - all_packets[0].2 = true; - let result = BankingStage::filter_valid_packets_for_forwarding(all_packets.iter()); + packet_batches[0].2 = true; + let result = BankingStage::filter_valid_packets_for_forwarding(packet_batches.iter()); assert_eq!(result.len(), 240); } @@ -2666,12 +2714,15 @@ mod tests { setup_conflicting_transactions(&ledger_path); let recorder = poh_recorder.lock().unwrap().recorder(); let num_conflicting_transactions = transactions.len(); - let mut packets_vec = to_packets_chunked(&transactions, num_conflicting_transactions); - assert_eq!(packets_vec.len(), 1); - assert_eq!(packets_vec[0].packets.len(), num_conflicting_transactions); - let all_packets = packets_vec.pop().unwrap(); - let mut buffered_packets: UnprocessedPackets = vec![( - all_packets, + let mut packet_batches = to_packet_batches(&transactions, num_conflicting_transactions); + assert_eq!(packet_batches.len(), 1); + assert_eq!( + packet_batches[0].packets.len(), + num_conflicting_transactions + ); + let packet_batch = packet_batches.pop().unwrap(); + let mut buffered_packet_batches: UnprocessedPacketBatches = vec![( + packet_batch, (0..num_conflicting_transactions).into_iter().collect(), false, )] @@ -2687,7 +2738,7 @@ mod tests { &Pubkey::default(), max_tx_processing_ns, &poh_recorder, - &mut buffered_packets, + &mut buffered_packet_batches, None, &gossip_vote_sender, None::>, @@ -2695,7 +2746,10 @@ mod tests { &recorder, &Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))), ); - assert_eq!(buffered_packets[0].1.len(), num_conflicting_transactions); + assert_eq!( + buffered_packet_batches[0].1.len(), + num_conflicting_transactions + ); // When the poh recorder has a bank, should process all non conflicting buffered packets. // Processes one packet per iteration of the loop for num_expected_unprocessed in (0..num_conflicting_transactions).rev() { @@ -2704,7 +2758,7 @@ mod tests { &Pubkey::default(), max_tx_processing_ns, &poh_recorder, - &mut buffered_packets, + &mut buffered_packet_batches, None, &gossip_vote_sender, None::>, @@ -2713,9 +2767,9 @@ mod tests { &Arc::new(QosService::new(Arc::new(RwLock::new(CostModel::default())))), ); if num_expected_unprocessed == 0 { - assert!(buffered_packets.is_empty()) + assert!(buffered_packet_batches.is_empty()) } else { - assert_eq!(buffered_packets[0].1.len(), num_expected_unprocessed); + assert_eq!(buffered_packet_batches[0].1.len(), num_expected_unprocessed); } } poh_recorder @@ -2735,12 +2789,12 @@ mod tests { let (transactions, bank, poh_recorder, _entry_receiver, poh_simulator) = setup_conflicting_transactions(&ledger_path); let num_conflicting_transactions = transactions.len(); - let packets_vec = to_packets_chunked(&transactions, 1); - assert_eq!(packets_vec.len(), num_conflicting_transactions); - for single_packets in &packets_vec { - assert_eq!(single_packets.packets.len(), 1); + let packet_batches = to_packet_batches(&transactions, 1); + assert_eq!(packet_batches.len(), num_conflicting_transactions); + for single_packet_batch in &packet_batches { + assert_eq!(single_packet_batch.packets.len(), 1); } - let mut buffered_packets: UnprocessedPackets = packets_vec + let mut buffered_packet_batches: UnprocessedPacketBatches = packet_batches .clone() .into_iter() .map(|single_packets| (single_packets, vec![0], false)) @@ -2754,8 +2808,8 @@ mod tests { continue_receiver.recv().unwrap(); }); // When the poh recorder has a bank, it should process all non conflicting buffered packets. - // Because each conflicting transaction is in it's own `Packet` within `packets_vec`, then - // each iteration of this loop will process one element of `packets_vec`per iteration of the + // Because each conflicting transaction is in it's own `Packet` within a `PacketBatch`, then + // each iteration of this loop will process one element of the batch per iteration of the // loop. let interrupted_iteration = 1; poh_recorder.lock().unwrap().set_bank(&bank); @@ -2770,7 +2824,7 @@ mod tests { &Pubkey::default(), std::u128::MAX, &poh_recorder_, - &mut buffered_packets, + &mut buffered_packet_batches, None, &gossip_vote_sender, test_fn, @@ -2782,13 +2836,13 @@ mod tests { // Check everything is correct. All indexes after `interrupted_iteration` // should still be unprocessed assert_eq!( - buffered_packets.len(), - packets_vec[interrupted_iteration + 1..].len() + buffered_packet_batches.len(), + packet_batches[interrupted_iteration + 1..].len() ); for ((remaining_unprocessed_packet, _, _forwarded), original_packet) in - buffered_packets + buffered_packet_batches .iter() - .zip(&packets_vec[interrupted_iteration + 1..]) + .zip(&packet_batches[interrupted_iteration + 1..]) { assert_eq!( remaining_unprocessed_packet.packets[0], @@ -2823,17 +2877,16 @@ mod tests { #[test] fn test_forwarder_budget() { solana_logger::setup(); - // Create `Packets` with 1 unprocessed element - let single_element_packets = Packets::new(vec![Packet::default()]); - let mut unprocessed_packets: UnprocessedPackets = - vec![(single_element_packets, vec![0], false)] - .into_iter() - .collect(); - - let cluster_info = new_test_cluster_info(Node::new_localhost().info); + // Create `PacketBatch` with 1 unprocessed packet + let packet = Packet::from_data(None, &[0]).unwrap(); + let single_packet_batch = PacketBatch::new(vec![packet]); let genesis_config_info = create_slow_genesis_config(10_000); - let GenesisConfigInfo { genesis_config, .. } = &genesis_config_info; + let GenesisConfigInfo { + genesis_config, + validator_pubkey, + .. + } = &genesis_config_info; let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config)); let ledger_path = get_tmp_ledger_path!(); @@ -2852,17 +2905,153 @@ mod tests { let (exit, poh_recorder, poh_service, _entry_receiver) = create_test_recorder(&bank, &blockstore, Some(poh_config)); - let socket = UdpSocket::bind("0.0.0.0:0").unwrap(); - let data_budget = DataBudget::default(); - BankingStage::handle_forwarding( - &ForwardOption::ForwardTransaction, - &cluster_info, - &mut unprocessed_packets, - &poh_recorder, - &socket, - false, - &data_budget, + let local_node = Node::new_localhost_with_pubkey(validator_pubkey); + let cluster_info = new_test_cluster_info(local_node.info); + let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + let recv_socket = &local_node.sockets.tpu_forwards[0]; + + let test_cases = vec![ + ("budget-restricted", DataBudget::restricted(), 0), + ("budget-available", DataBudget::default(), 1), + ]; + + for (name, data_budget, expected_num_forwarded) in test_cases { + let mut unprocessed_packet_batches: UnprocessedPacketBatches = + vec![(single_packet_batch.clone(), vec![0], false)] + .into_iter() + .collect(); + BankingStage::handle_forwarding( + &ForwardOption::ForwardTransaction, + &cluster_info, + &mut unprocessed_packet_batches, + &poh_recorder, + &send_socket, + true, + &data_budget, + ); + + recv_socket + .set_nonblocking(expected_num_forwarded == 0) + .unwrap(); + + let mut packets = vec![Packet::default(); 2]; + let num_received = recv_mmsg(recv_socket, &mut packets[..]).unwrap_or_default(); + assert_eq!(num_received, expected_num_forwarded, "{}", name); + } + + exit.store(true, Ordering::Relaxed); + poh_service.join().unwrap(); + } + Blockstore::destroy(&ledger_path).unwrap(); + } + + #[test] + fn test_handle_forwarding() { + solana_logger::setup(); + + const FWD_PACKET: u8 = 1; + let forwarded_packet = { + let mut packet = Packet::from_data(None, &[FWD_PACKET]).unwrap(); + packet.meta.flags |= PacketFlags::FORWARDED; + packet + }; + + const NORMAL_PACKET: u8 = 2; + let normal_packet = Packet::from_data(None, &[NORMAL_PACKET]).unwrap(); + + let packet_batch = PacketBatch::new(vec![forwarded_packet, normal_packet]); + let mut unprocessed_packet_batches: UnprocessedPacketBatches = + vec![(packet_batch, vec![0, 1], false)] + .into_iter() + .collect(); + + let genesis_config_info = create_slow_genesis_config(10_000); + let GenesisConfigInfo { + genesis_config, + validator_pubkey, + .. + } = &genesis_config_info; + let bank = Arc::new(Bank::new_no_wallclock_throttle_for_tests(genesis_config)); + let ledger_path = get_tmp_ledger_path!(); + { + let blockstore = Arc::new( + Blockstore::open(&ledger_path) + .expect("Expected to be able to open database ledger"), ); + let poh_config = PohConfig { + // limit tick count to avoid clearing working_bank at + // PohRecord then PohRecorderError(MaxHeightReached) at BankingStage + target_tick_count: Some(bank.max_tick_height() - 1), + ..PohConfig::default() + }; + + let (exit, poh_recorder, poh_service, _entry_receiver) = + create_test_recorder(&bank, &blockstore, Some(poh_config)); + + let local_node = Node::new_localhost_with_pubkey(validator_pubkey); + let cluster_info = new_test_cluster_info(local_node.info); + let send_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); + let recv_socket = &local_node.sockets.tpu_forwards[0]; + + let test_cases = vec![ + ("not-forward", ForwardOption::NotForward, true, vec![], 2), + ( + "fwd-normal", + ForwardOption::ForwardTransaction, + true, + vec![NORMAL_PACKET], + 2, + ), + ( + "fwd-no-op", + ForwardOption::ForwardTransaction, + true, + vec![], + 2, + ), + ( + "fwd-no-hold", + ForwardOption::ForwardTransaction, + false, + vec![], + 0, + ), + ]; + + for (name, forward_option, hold, expected_ids, expected_num_unprocessed) in test_cases { + BankingStage::handle_forwarding( + &forward_option, + &cluster_info, + &mut unprocessed_packet_batches, + &poh_recorder, + &send_socket, + hold, + &DataBudget::default(), + ); + + recv_socket + .set_nonblocking(expected_ids.is_empty()) + .unwrap(); + + let mut packets = vec![Packet::default(); 2]; + let num_received = recv_mmsg(recv_socket, &mut packets[..]).unwrap_or_default(); + assert_eq!(num_received, expected_ids.len(), "{}", name); + for (i, expected_id) in expected_ids.iter().enumerate() { + assert_eq!(packets[i].meta.size, 1); + assert_eq!(packets[i].data[0], *expected_id, "{}", name); + } + + let num_unprocessed_packets: usize = unprocessed_packet_batches + .iter() + .map(|(b, ..)| b.packets.len()) + .sum(); + assert_eq!( + num_unprocessed_packets, expected_num_unprocessed, + "{}", + name + ); + } + exit.store(true, Ordering::Relaxed); poh_service.join().unwrap(); } @@ -2872,14 +3061,16 @@ mod tests { #[test] fn test_push_unprocessed_batch_limit() { solana_logger::setup(); - // Create `Packets` with 2 unprocessed elements - let new_packets = Packets::new(vec![Packet::default(); 2]); - let mut unprocessed_packets: UnprocessedPackets = - vec![(new_packets, vec![0, 1], false)].into_iter().collect(); + // Create `PacketBatch` with 2 unprocessed packets + let new_packet_batch = PacketBatch::new(vec![Packet::default(); 2]); + let mut unprocessed_packets: UnprocessedPacketBatches = + vec![(new_packet_batch, vec![0, 1], false)] + .into_iter() + .collect(); // Set the limit to 2 let batch_limit = 2; - // Create some new unprocessed packets - let new_packets = Packets::new(vec![Packet::default()]); + // Create new unprocessed packets and add to a batch + let new_packet_batch = PacketBatch::new(vec![Packet::default()]); let packet_indexes = vec![]; let duplicates = Arc::new(Mutex::new(( @@ -2894,7 +3085,7 @@ mod tests { // packets are not added to the unprocessed queue BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets.clone(), + new_packet_batch.clone(), packet_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2913,7 +3104,7 @@ mod tests { let packet_indexes = vec![0]; BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets, + new_packet_batch, packet_indexes.clone(), &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2929,7 +3120,7 @@ mod tests { // Because we've reached the batch limit, old unprocessed packets are // dropped and the new one is appended to the end - let new_packets = Packets::new(vec![Packet::from_data( + let new_packet_batch = PacketBatch::new(vec![Packet::from_data( Some(&SocketAddr::from(([127, 0, 0, 1], 8001))), 42, ) @@ -2937,7 +3128,7 @@ mod tests { assert_eq!(unprocessed_packets.len(), batch_limit); BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets.clone(), + new_packet_batch.clone(), packet_indexes.clone(), &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2947,7 +3138,10 @@ mod tests { &banking_stage_stats, ); assert_eq!(unprocessed_packets.len(), 2); - assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]); + assert_eq!( + unprocessed_packets[1].0.packets[0], + new_packet_batch.packets[0] + ); assert_eq!(dropped_packet_batches_count, 1); assert_eq!(dropped_packets_count, 2); assert_eq!(newly_buffered_packets_count, 2); @@ -2955,7 +3149,7 @@ mod tests { // Check duplicates are dropped (newly buffered shouldn't change) BankingStage::push_unprocessed( &mut unprocessed_packets, - new_packets.clone(), + new_packet_batch.clone(), packet_indexes, &mut dropped_packet_batches_count, &mut dropped_packets_count, @@ -2965,7 +3159,10 @@ mod tests { &banking_stage_stats, ); assert_eq!(unprocessed_packets.len(), 2); - assert_eq!(unprocessed_packets[1].0.packets[0], new_packets.packets[0]); + assert_eq!( + unprocessed_packets[1].0.packets[0], + new_packet_batch.packets[0] + ); assert_eq!(dropped_packet_batches_count, 1); assert_eq!(dropped_packets_count, 2); assert_eq!(newly_buffered_packets_count, 2); @@ -2988,19 +3185,19 @@ mod tests { fn make_test_packets( transactions: Vec, vote_indexes: Vec, - ) -> (Packets, Vec) { + ) -> (PacketBatch, Vec) { let capacity = transactions.len(); - let mut packets = Packets::with_capacity(capacity); + let mut packet_batch = PacketBatch::with_capacity(capacity); let mut packet_indexes = Vec::with_capacity(capacity); - packets.packets.resize(capacity, Packet::default()); + packet_batch.packets.resize(capacity, Packet::default()); for (index, tx) in transactions.iter().enumerate() { - Packet::populate_packet(&mut packets.packets[index], None, tx).ok(); + Packet::populate_packet(&mut packet_batch.packets[index], None, tx).ok(); packet_indexes.push(index); } for index in vote_indexes.iter() { - packets.packets[*index].meta.is_simple_vote_tx = true; + packet_batch.packets[*index].meta.flags |= PacketFlags::SIMPLE_VOTE_TX; } - (packets, packet_indexes) + (packet_batch, packet_indexes) } #[test] @@ -3022,12 +3219,12 @@ mod tests { // packets with no votes { let vote_indexes = vec![]; - let (packets, packet_indexes) = + let (packet_batch, packet_indexes) = make_test_packets(vec![transfer_tx.clone(), transfer_tx.clone()], vote_indexes); let mut votes_only = false; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3037,7 +3234,7 @@ mod tests { votes_only = true; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3049,14 +3246,14 @@ mod tests { // packets with some votes { let vote_indexes = vec![0, 2]; - let (packets, packet_indexes) = make_test_packets( + let (packet_batch, packet_indexes) = make_test_packets( vec![vote_tx.clone(), transfer_tx, vote_tx.clone()], vote_indexes, ); let mut votes_only = false; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3066,7 +3263,7 @@ mod tests { votes_only = true; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3078,14 +3275,14 @@ mod tests { // packets with all votes { let vote_indexes = vec![0, 1, 2]; - let (packets, packet_indexes) = make_test_packets( + let (packet_batch, packet_indexes) = make_test_packets( vec![vote_tx.clone(), vote_tx.clone(), vote_tx], vote_indexes, ); let mut votes_only = false; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, @@ -3095,7 +3292,7 @@ mod tests { votes_only = true; let (txs, tx_packet_index) = BankingStage::transactions_from_packets( - &packets, + &packet_batch, &packet_indexes, &Arc::new(FeatureSet::default()), votes_only, diff --git a/core/src/broadcast_stage.rs b/core/src/broadcast_stage.rs index 388f3e6168acc0..9fba19ba3390bc 100644 --- a/core/src/broadcast_stage.rs +++ b/core/src/broadcast_stage.rs @@ -496,6 +496,7 @@ pub mod test { &keypair, &data_shreds[0..], true, // is_last_in_slot + 0, // next_code_index &mut ProcessShredsStats::default(), ) .unwrap(); diff --git a/core/src/broadcast_stage/broadcast_duplicates_run.rs b/core/src/broadcast_stage/broadcast_duplicates_run.rs index 5964cdb606c895..ea39323f964655 100644 --- a/core/src/broadcast_stage/broadcast_duplicates_run.rs +++ b/core/src/broadcast_stage/broadcast_duplicates_run.rs @@ -28,6 +28,7 @@ pub(super) struct BroadcastDuplicatesRun { config: BroadcastDuplicatesConfig, current_slot: Slot, next_shred_index: u32, + next_code_index: u32, shred_version: u16, recent_blockhash: Option, prev_entry_hash: Option, @@ -46,6 +47,7 @@ impl BroadcastDuplicatesRun { Self { config, next_shred_index: u32::MAX, + next_code_index: 0, shred_version, current_slot: 0, recent_blockhash: None, @@ -74,6 +76,7 @@ impl BroadcastRun for BroadcastDuplicatesRun { if bank.slot() != self.current_slot { self.next_shred_index = 0; + self.next_code_index = 0; self.current_slot = bank.slot(); self.prev_entry_hash = None; self.num_slots_broadcasted += 1; @@ -154,22 +157,26 @@ impl BroadcastRun for BroadcastDuplicatesRun { ) .expect("Expected to create a new shredder"); - let (data_shreds, _, _) = shredder.entries_to_shreds( + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), self.next_shred_index, + self.next_code_index, ); self.next_shred_index += data_shreds.len() as u32; + if let Some(index) = coding_shreds.iter().map(Shred::index).max() { + self.next_code_index = index + 1; + } let last_shreds = last_entries.map(|(original_last_entry, duplicate_extra_last_entries)| { - let (original_last_data_shred, _, _) = - shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index); + let (original_last_data_shred, _) = + shredder.entries_to_shreds(keypair, &[original_last_entry], true, self.next_shred_index, self.next_code_index); - let (partition_last_data_shred, _, _) = + let (partition_last_data_shred, _) = // Don't mark the last shred as last so that validators won't know that // they've gotten all the shreds, and will continue trying to repair - shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index); + shredder.entries_to_shreds(keypair, &duplicate_extra_last_entries, true, self.next_shred_index, self.next_code_index); let sigs: Vec<_> = partition_last_data_shred.iter().map(|s| (s.signature(), s.index())).collect(); info!( diff --git a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs index f430da9b0fada8..a0bf77153a1cd0 100644 --- a/core/src/broadcast_stage/broadcast_fake_shreds_run.rs +++ b/core/src/broadcast_stage/broadcast_fake_shreds_run.rs @@ -10,6 +10,7 @@ pub(super) struct BroadcastFakeShredsRun { last_blockhash: Hash, partition: usize, shred_version: u16, + next_code_index: u32, } impl BroadcastFakeShredsRun { @@ -18,6 +19,7 @@ impl BroadcastFakeShredsRun { last_blockhash: Hash::default(), partition, shred_version, + next_code_index: 0, } } } @@ -52,11 +54,12 @@ impl BroadcastRun for BroadcastFakeShredsRun { ) .expect("Expected to create a new shredder"); - let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds( + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height(), next_shred_index, + self.next_code_index, ); // If the last blockhash is default, a new block is being created @@ -69,13 +72,23 @@ impl BroadcastRun for BroadcastFakeShredsRun { .map(|_| Entry::new(&self.last_blockhash, 0, vec![])) .collect(); - let (fake_data_shreds, fake_coding_shreds, _) = shredder.entries_to_shreds( + let (fake_data_shreds, fake_coding_shreds) = shredder.entries_to_shreds( keypair, &fake_entries, last_tick_height == bank.max_tick_height(), next_shred_index, + self.next_code_index, ); + if let Some(index) = coding_shreds + .iter() + .chain(&fake_coding_shreds) + .map(Shred::index) + .max() + { + self.next_code_index = index + 1; + } + // If it's the last tick, reset the last block hash to default // this will cause next run to grab last bank's blockhash if last_tick_height == bank.max_tick_height() { diff --git a/core/src/broadcast_stage/broadcast_utils.rs b/core/src/broadcast_stage/broadcast_utils.rs index 0e76de935b2b49..902b5672d7e22c 100644 --- a/core/src/broadcast_stage/broadcast_utils.rs +++ b/core/src/broadcast_stage/broadcast_utils.rs @@ -21,6 +21,7 @@ pub(super) struct ReceiveResults { #[derive(Clone)] pub struct UnfinishedSlotInfo { pub next_shred_index: u32, + pub(crate) next_code_index: u32, pub slot: Slot, pub parent: Slot, // Data shreds buffered to make a batch of size diff --git a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs index 5c74e1e56d95ea..5a65653d608628 100644 --- a/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs +++ b/core/src/broadcast_stage/fail_entry_verification_broadcast_run.rs @@ -15,6 +15,7 @@ pub(super) struct FailEntryVerificationBroadcastRun { good_shreds: Vec, current_slot: Slot, next_shred_index: u32, + next_code_index: u32, cluster_nodes_cache: Arc>, } @@ -29,6 +30,7 @@ impl FailEntryVerificationBroadcastRun { good_shreds: vec![], current_slot: 0, next_shred_index: 0, + next_code_index: 0, cluster_nodes_cache, } } @@ -50,6 +52,7 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { if bank.slot() != self.current_slot { self.next_shred_index = 0; + self.next_code_index = 0; self.current_slot = bank.slot(); } @@ -83,22 +86,26 @@ impl BroadcastRun for FailEntryVerificationBroadcastRun { ) .expect("Expected to create a new shredder"); - let (data_shreds, _, _) = shredder.entries_to_shreds( + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( keypair, &receive_results.entries, last_tick_height == bank.max_tick_height() && last_entries.is_none(), self.next_shred_index, + self.next_code_index, ); self.next_shred_index += data_shreds.len() as u32; + if let Some(index) = coding_shreds.iter().map(Shred::index).max() { + self.next_code_index = index + 1; + } let last_shreds = last_entries.map(|(good_last_entry, bad_last_entry)| { - let (good_last_data_shred, _, _) = - shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index); + let (good_last_data_shred, _) = + shredder.entries_to_shreds(keypair, &[good_last_entry], true, self.next_shred_index, self.next_code_index); - let (bad_last_data_shred, _, _) = + let (bad_last_data_shred, _) = // Don't mark the last shred as last so that validators won't know that // they've gotten all the shreds, and will continue trying to repair - shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index); + shredder.entries_to_shreds(keypair, &[bad_last_entry], false, self.next_shred_index, self.next_code_index); self.next_shred_index += 1; (good_last_data_shred, bad_last_data_shred) diff --git a/core/src/broadcast_stage/standard_broadcast_run.rs b/core/src/broadcast_stage/standard_broadcast_run.rs index 7a0dbdfa560f80..1788b19e863be4 100644 --- a/core/src/broadcast_stage/standard_broadcast_run.rs +++ b/core/src/broadcast_stage/standard_broadcast_run.rs @@ -119,17 +119,16 @@ impl StandardBroadcastRun { None => (0, 0), }, }; - let (data_shreds, next_shred_index) = - Shredder::new(slot, parent_slot, reference_tick, self.shred_version) - .unwrap() - .entries_to_data_shreds( - keypair, - entries, - is_slot_end, - next_shred_index, - fec_set_offset, - process_stats, - ); + let data_shreds = Shredder::new(slot, parent_slot, reference_tick, self.shred_version) + .unwrap() + .entries_to_data_shreds( + keypair, + entries, + is_slot_end, + next_shred_index, + fec_set_offset, + process_stats, + ); let mut data_shreds_buffer = match &mut self.unfinished_slot { Some(state) => { assert_eq!(state.slot, slot); @@ -138,8 +137,17 @@ impl StandardBroadcastRun { None => Vec::default(), }; data_shreds_buffer.extend(data_shreds.clone()); + let next_shred_index = match data_shreds.iter().map(Shred::index).max() { + Some(index) => index + 1, + None => next_shred_index, + }; + let next_code_index = match &self.unfinished_slot { + Some(state) => state.next_code_index, + None => 0, + }; self.unfinished_slot = Some(UnfinishedSlotInfo { next_shred_index, + next_code_index, slot, parent: parent_slot, data_shreds_buffer, @@ -446,23 +454,40 @@ fn make_coding_shreds( is_slot_end: bool, stats: &mut ProcessShredsStats, ) -> Vec { - let data_shreds = match unfinished_slot { - None => Vec::default(), - Some(unfinished_slot) => { - let size = unfinished_slot.data_shreds_buffer.len(); - // Consume a multiple of 32, unless this is the slot end. - let offset = if is_slot_end { - 0 - } else { - size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize - }; - unfinished_slot - .data_shreds_buffer - .drain(0..size - offset) - .collect() - } + let unfinished_slot = match unfinished_slot { + None => return Vec::default(), + Some(state) => state, }; - Shredder::data_shreds_to_coding_shreds(keypair, &data_shreds, is_slot_end, stats).unwrap() + let data_shreds: Vec<_> = { + let size = unfinished_slot.data_shreds_buffer.len(); + // Consume a multiple of 32, unless this is the slot end. + let offset = if is_slot_end { + 0 + } else { + size % MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + }; + unfinished_slot + .data_shreds_buffer + .drain(0..size - offset) + .collect() + }; + let shreds = Shredder::data_shreds_to_coding_shreds( + keypair, + &data_shreds, + is_slot_end, + unfinished_slot.next_code_index, + stats, + ) + .unwrap(); + if let Some(index) = shreds + .iter() + .filter(|shred| shred.is_code()) + .map(Shred::index) + .max() + { + unfinished_slot.next_code_index = unfinished_slot.next_code_index.max(index + 1); + } + shreds } impl BroadcastRun for StandardBroadcastRun { @@ -579,6 +604,7 @@ mod test { let parent = 0; run.unfinished_slot = Some(UnfinishedSlotInfo { next_shred_index, + next_code_index: 17, slot, parent, data_shreds_buffer: Vec::default(), @@ -596,7 +622,7 @@ mod test { .expect("Expected a shred that signals an interrupt"); // Validate the shred - assert_eq!(shred.parent(), Some(parent)); + assert_eq!(shred.parent().unwrap(), parent); assert_eq!(shred.slot(), slot); assert_eq!(shred.index(), next_shred_index); assert!(shred.is_data()); diff --git a/core/src/cluster_info_vote_listener.rs b/core/src/cluster_info_vote_listener.rs index d6c20973d40aba..997f40d75786b7 100644 --- a/core/src/cluster_info_vote_listener.rs +++ b/core/src/cluster_info_vote_listener.rs @@ -13,7 +13,6 @@ use { unbounded, Receiver as CrossbeamReceiver, RecvTimeoutError, Select, Sender as CrossbeamSender, }, - itertools::izip, log::*, solana_gossip::{ cluster_info::{ClusterInfo, GOSSIP_SLEEP_MILLIS}, @@ -22,7 +21,7 @@ use { solana_ledger::blockstore::Blockstore, solana_measure::measure::Measure, solana_metrics::inc_new_counter_debug, - solana_perf::packet::{self, Packets}, + solana_perf::packet::{self, PacketBatch}, solana_poh::poh_recorder::PohRecorder, solana_rpc::{ optimistically_confirmed_bank_tracker::{BankNotification, BankNotificationSender}, @@ -32,12 +31,11 @@ use { bank::Bank, bank_forks::BankForks, commitment::VOTE_THRESHOLD_SIZE, - epoch_stakes::{EpochAuthorizedVoters, EpochStakes}, + epoch_stakes::EpochStakes, vote_sender_types::{ReplayVoteReceiver, ReplayedVote}, }, solana_sdk::{ - clock::{Epoch, Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, - epoch_schedule::EpochSchedule, + clock::{Slot, DEFAULT_MS_PER_SLOT, DEFAULT_TICKS_PER_SLOT}, hash::Hash, pubkey::Pubkey, signature::Signature, @@ -47,6 +45,7 @@ use { solana_vote_program::{self, vote_state::Vote, vote_transaction}, std::{ collections::{HashMap, HashSet}, + iter::repeat, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, RwLock, @@ -58,7 +57,6 @@ use { // Map from a vote account to the authorized voter for an epoch pub type ThresholdConfirmedSlots = Vec<(Slot, Hash)>; -pub type VotedHashUpdates = HashMap>; pub type VerifiedLabelVotePacketsSender = CrossbeamSender>; pub type VerifiedLabelVotePacketsReceiver = CrossbeamReceiver>; pub type VerifiedVoteTransactionsSender = CrossbeamSender>; @@ -85,14 +83,14 @@ pub struct SlotVoteTracker { } impl SlotVoteTracker { - pub fn get_voted_slot_updates(&mut self) -> Option> { + pub(crate) fn get_voted_slot_updates(&mut self) -> Option> { self.voted_slot_updates.take() } - pub fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker { + fn get_or_insert_optimistic_votes_tracker(&mut self, hash: Hash) -> &mut VoteStakeTracker { self.optimistic_votes_tracker.entry(hash).or_default() } - pub fn optimistic_votes_tracker(&self, hash: &Hash) -> Option<&VoteStakeTracker> { + pub(crate) fn optimistic_votes_tracker(&self, hash: &Hash) -> Option<&VoteStakeTracker> { self.optimistic_votes_tracker.get(hash) } } @@ -101,82 +99,29 @@ impl SlotVoteTracker { pub struct VoteTracker { // Map from a slot to a set of validators who have voted for that slot slot_vote_trackers: RwLock>>>, - // Don't track votes from people who are not staked, acts as a spam filter - epoch_authorized_voters: RwLock>>, - leader_schedule_epoch: RwLock, - current_epoch: RwLock, - epoch_schedule: EpochSchedule, } impl VoteTracker { - pub fn new(root_bank: &Bank) -> Self { - let current_epoch = root_bank.epoch(); - let vote_tracker = Self { - leader_schedule_epoch: RwLock::new(current_epoch), - current_epoch: RwLock::new(current_epoch), - epoch_schedule: *root_bank.epoch_schedule(), - ..VoteTracker::default() - }; + pub(crate) fn new(root_bank: &Bank) -> Self { + let vote_tracker = VoteTracker::default(); vote_tracker.progress_with_new_root_bank(root_bank); - assert_eq!( - *vote_tracker.leader_schedule_epoch.read().unwrap(), - root_bank.get_leader_schedule_epoch(root_bank.slot()) - ); - assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch,); vote_tracker } - pub fn get_or_insert_slot_tracker(&self, slot: Slot) -> Arc> { - let mut slot_tracker = self.slot_vote_trackers.read().unwrap().get(&slot).cloned(); - - if slot_tracker.is_none() { - let new_slot_tracker = Arc::new(RwLock::new(SlotVoteTracker { - voted: HashMap::new(), - optimistic_votes_tracker: HashMap::default(), - voted_slot_updates: None, - gossip_only_stake: 0, - })); - self.slot_vote_trackers - .write() - .unwrap() - .insert(slot, new_slot_tracker.clone()); - slot_tracker = Some(new_slot_tracker); + fn get_or_insert_slot_tracker(&self, slot: Slot) -> Arc> { + if let Some(slot_vote_tracker) = self.slot_vote_trackers.read().unwrap().get(&slot) { + return slot_vote_tracker.clone(); } - - slot_tracker.unwrap() + let mut slot_vote_trackers = self.slot_vote_trackers.write().unwrap(); + slot_vote_trackers.entry(slot).or_default().clone() } - pub fn get_slot_vote_tracker(&self, slot: Slot) -> Option>> { + pub(crate) fn get_slot_vote_tracker(&self, slot: Slot) -> Option>> { self.slot_vote_trackers.read().unwrap().get(&slot).cloned() } - pub fn get_authorized_voter(&self, pubkey: &Pubkey, slot: Slot) -> Option { - let epoch = self.epoch_schedule.get_epoch(slot); - self.epoch_authorized_voters - .read() - .unwrap() - .get(&epoch) - .map(|epoch_authorized_voters| epoch_authorized_voters.get(pubkey)) - .unwrap_or(None) - .cloned() - } - - pub fn vote_contains_authorized_voter( - vote_tx: &Transaction, - authorized_voter: &Pubkey, - ) -> bool { - let message = &vote_tx.message; - for (i, key) in message.account_keys.iter().enumerate() { - if message.is_signer(i) && key == authorized_voter { - return true; - } - } - - false - } - #[cfg(test)] - pub fn insert_vote(&self, slot: Slot, pubkey: Pubkey) { + pub(crate) fn insert_vote(&self, slot: Slot, pubkey: Pubkey) { let mut w_slot_vote_trackers = self.slot_vote_trackers.write().unwrap(); let slot_vote_tracker = w_slot_vote_trackers.entry(slot).or_default(); @@ -191,59 +136,16 @@ impl VoteTracker { } } - fn progress_leader_schedule_epoch(&self, root_bank: &Bank) { - // Update with any newly calculated epoch state about future epochs - let start_leader_schedule_epoch = *self.leader_schedule_epoch.read().unwrap(); - let mut greatest_leader_schedule_epoch = start_leader_schedule_epoch; - for leader_schedule_epoch in - start_leader_schedule_epoch..=root_bank.get_leader_schedule_epoch(root_bank.slot()) - { - let exists = self - .epoch_authorized_voters - .read() - .unwrap() - .contains_key(&leader_schedule_epoch); - if !exists { - let epoch_authorized_voters = root_bank - .epoch_stakes(leader_schedule_epoch) - .unwrap() - .epoch_authorized_voters() - .clone(); - self.epoch_authorized_voters - .write() - .unwrap() - .insert(leader_schedule_epoch, epoch_authorized_voters); - greatest_leader_schedule_epoch = leader_schedule_epoch; - } - } - - if greatest_leader_schedule_epoch != start_leader_schedule_epoch { - *self.leader_schedule_epoch.write().unwrap() = greatest_leader_schedule_epoch; - } - } - fn purge_stale_state(&self, root_bank: &Bank) { // Purge any outdated slot data let new_root = root_bank.slot(); - let root_epoch = root_bank.epoch(); self.slot_vote_trackers .write() .unwrap() .retain(|slot, _| *slot >= new_root); - - let current_epoch = *self.current_epoch.read().unwrap(); - if root_epoch != current_epoch { - // If root moved to a new epoch, purge outdated state - self.epoch_authorized_voters - .write() - .unwrap() - .retain(|epoch, _| *epoch >= root_epoch); - *self.current_epoch.write().unwrap() = root_epoch; - } } fn progress_with_new_root_bank(&self, root_bank: &Bank) { - self.progress_leader_schedule_epoch(root_bank); self.purge_stale_state(root_bank); } } @@ -294,10 +196,10 @@ pub struct ClusterInfoVoteListener { impl ClusterInfoVoteListener { #[allow(clippy::too_many_arguments)] pub fn new( - exit: &Arc, + exit: Arc, cluster_info: Arc, - verified_packets_sender: CrossbeamSender>, - poh_recorder: &Arc>, + verified_packets_sender: CrossbeamSender>, + poh_recorder: Arc>, vote_tracker: Arc, bank_forks: Arc>, subscriptions: Arc, @@ -308,25 +210,26 @@ impl ClusterInfoVoteListener { bank_notification_sender: Option, cluster_confirmed_slot_sender: GossipDuplicateConfirmedSlotsSender, ) -> Self { - let exit_ = exit.clone(); - let (verified_vote_label_packets_sender, verified_vote_label_packets_receiver) = unbounded(); let (verified_vote_transactions_sender, verified_vote_transactions_receiver) = unbounded(); - let listen_thread = Builder::new() - .name("solana-cluster_info_vote_listener".to_string()) - .spawn(move || { - let _ = Self::recv_loop( - exit_, - &cluster_info, - verified_vote_label_packets_sender, - verified_vote_transactions_sender, - ); - }) - .unwrap(); - + let listen_thread = { + let exit = exit.clone(); + let bank_forks = bank_forks.clone(); + Builder::new() + .name("solana-cluster_info_vote_listener".to_string()) + .spawn(move || { + let _ = Self::recv_loop( + exit, + &cluster_info, + &bank_forks, + verified_vote_label_packets_sender, + verified_vote_transactions_sender, + ); + }) + .unwrap() + }; let exit_ = exit.clone(); - let poh_recorder = poh_recorder.clone(); let bank_send_thread = Builder::new() .name("solana-cluster_info_bank_send".to_string()) .spawn(move || { @@ -339,12 +242,11 @@ impl ClusterInfoVoteListener { }) .unwrap(); - let exit_ = exit.clone(); let send_thread = Builder::new() .name("solana-cluster_info_process_votes".to_string()) .spawn(move || { let _ = Self::process_votes_loop( - exit_, + exit, verified_vote_transactions_receiver, vote_tracker, bank_forks, @@ -364,16 +266,14 @@ impl ClusterInfoVoteListener { } } - pub fn join(self) -> thread::Result<()> { - for thread_hdl in self.thread_hdls { - thread_hdl.join()?; - } - Ok(()) + pub(crate) fn join(self) -> thread::Result<()> { + self.thread_hdls.into_iter().try_for_each(JoinHandle::join) } fn recv_loop( exit: Arc, cluster_info: &ClusterInfo, + bank_forks: &RwLock, verified_vote_label_packets_sender: VerifiedLabelVotePacketsSender, verified_vote_transactions_sender: VerifiedVoteTransactionsSender, ) -> Result<()> { @@ -382,7 +282,7 @@ impl ClusterInfoVoteListener { let votes = cluster_info.get_votes(&mut cursor); inc_new_counter_debug!("cluster_info_vote_listener-recv_count", votes.len()); if !votes.is_empty() { - let (vote_txs, packets) = Self::verify_votes(votes); + let (vote_txs, packets) = Self::verify_votes(votes, bank_forks); verified_vote_transactions_sender.send(vote_txs)?; verified_vote_label_packets_sender.send(packets)?; } @@ -392,50 +292,52 @@ impl ClusterInfoVoteListener { } #[allow(clippy::type_complexity)] - fn verify_votes(votes: Vec) -> (Vec, Vec) { - let mut msgs = packet::to_packets_chunked(&votes, 1); + fn verify_votes( + votes: Vec, + bank_forks: &RwLock, + ) -> (Vec, Vec) { + let mut packet_batches = packet::to_packet_batches(&votes, 1); // Votes should already be filtered by this point. - let reject_non_vote = false; - sigverify::ed25519_verify_cpu(&mut msgs, reject_non_vote); - - let (vote_txs, vote_metadata) = izip!(votes.into_iter(), msgs,) - .filter_map(|(vote_tx, packet)| { - let (vote, vote_account_key) = vote_transaction::parse_vote_transaction(&vote_tx) - .and_then(|(vote_account_key, vote, _)| { - if vote.slots.is_empty() { - None - } else { - Some((vote, vote_account_key)) - } - })?; - - // to_packets_chunked() above split into 1 packet long chunks - assert_eq!(packet.packets.len(), 1); - if !packet.packets[0].meta.discard { - if let Some(signature) = vote_tx.signatures.first().cloned() { - return Some(( - vote_tx, - VerifiedVoteMetadata { - vote_account_key, - vote, - packet, - signature, - }, - )); - } + sigverify::ed25519_verify_cpu(&mut packet_batches, /*reject_non_vote=*/ false); + let root_bank = bank_forks.read().unwrap().root_bank(); + let epoch_schedule = root_bank.epoch_schedule(); + votes + .into_iter() + .zip(packet_batches) + .filter(|(_, packet_batch)| { + // to_packet_batches() above splits into 1 packet long batches + assert_eq!(packet_batch.packets.len(), 1); + !packet_batch.packets[0].meta.discard() + }) + .filter_map(|(tx, packet_batch)| { + let (vote_account_key, vote, _) = vote_transaction::parse_vote_transaction(&tx)?; + let slot = vote.last_voted_slot()?; + let epoch = epoch_schedule.get_epoch(slot); + let authorized_voter = root_bank + .epoch_stakes(epoch)? + .epoch_authorized_voters() + .get(&vote_account_key)?; + let mut keys = tx.message.account_keys.iter().enumerate(); + if !keys.any(|(i, key)| tx.message.is_signer(i) && key == authorized_voter) { + return None; } - None + let verified_vote_metadata = VerifiedVoteMetadata { + vote_account_key, + vote, + packet_batch, + signature: *tx.signatures.first()?, + }; + Some((tx, verified_vote_metadata)) }) - .unzip(); - (vote_txs, vote_metadata) + .unzip() } fn bank_send_loop( exit: Arc, verified_vote_label_packets_receiver: VerifiedLabelVotePacketsReceiver, poh_recorder: Arc>, - verified_packets_sender: &CrossbeamSender>, + verified_packets_sender: &CrossbeamSender>, ) -> Result<()> { let mut verified_vote_packets = VerifiedVotePackets::default(); let mut time_since_lock = Instant::now(); @@ -457,7 +359,7 @@ impl ClusterInfoVoteListener { ) { match e { Error::CrossbeamRecvTimeout(RecvTimeoutError::Disconnected) - | Error::ReadyTimeout => (), + | Error::CrossbeamRecvTimeout(RecvTimeoutError::Timeout) => (), _ => { error!("thread {:?} error {:?}", thread::current().name(), e); } @@ -483,7 +385,7 @@ impl ClusterInfoVoteListener { fn check_for_leader_bank_and_send_votes( bank_vote_sender_state_option: &mut Option, current_working_bank: Arc, - verified_packets_sender: &CrossbeamSender>, + verified_packets_sender: &CrossbeamSender>, verified_vote_packets: &VerifiedVotePackets, ) -> Result<()> { // We will take this lock at most once every `BANK_SEND_VOTES_LOOP_SLEEP_MS` @@ -555,7 +457,7 @@ impl ClusterInfoVoteListener { return Ok(()); } - let root_bank = bank_forks.read().unwrap().root_bank().clone(); + let root_bank = bank_forks.read().unwrap().root_bank(); if last_process_root.elapsed().as_millis() > DEFAULT_MS_PER_SLOT as u128 { let unrooted_optimistic_slots = confirmation_verifier .verify_for_unrooted_optimistic_slots(&root_bank, &blockstore); @@ -786,39 +688,6 @@ impl ClusterInfoVoteListener { } } - fn filter_gossip_votes( - vote_tracker: &VoteTracker, - vote_pubkey: &Pubkey, - vote: &Vote, - gossip_tx: &Transaction, - ) -> bool { - if vote.slots.is_empty() { - return false; - } - let last_vote_slot = vote.slots.last().unwrap(); - // Votes from gossip need to be verified as they have not been - // verified by the replay pipeline. Determine the authorized voter - // based on the last vote slot. This will drop votes from authorized - // voters trying to make votes for slots earlier than the epoch for - // which they are authorized - let actual_authorized_voter = - vote_tracker.get_authorized_voter(vote_pubkey, *last_vote_slot); - - if actual_authorized_voter.is_none() { - return false; - } - - // Voting without the correct authorized pubkey, dump the vote - if !VoteTracker::vote_contains_authorized_voter( - gossip_tx, - &actual_authorized_voter.unwrap(), - ) { - return false; - } - - true - } - fn filter_and_confirm_with_new_votes( vote_tracker: &VoteTracker, gossip_vote_txs: Vec, @@ -834,17 +703,12 @@ impl ClusterInfoVoteListener { let mut new_optimistic_confirmed_slots = vec![]; // Process votes from gossip and ReplayStage - for (is_gossip, (vote_pubkey, vote, _)) in gossip_vote_txs + let votes = gossip_vote_txs .iter() - .filter_map(|gossip_tx| { - vote_transaction::parse_vote_transaction(gossip_tx) - .filter(|(vote_pubkey, vote, _)| { - Self::filter_gossip_votes(vote_tracker, vote_pubkey, vote, gossip_tx) - }) - .map(|v| (true, v)) - }) - .chain(replayed_votes.into_iter().map(|v| (false, v))) - { + .filter_map(vote_transaction::parse_vote_transaction) + .zip(repeat(/*is_gossip:*/ true)) + .chain(replayed_votes.into_iter().zip(repeat(/*is_gossip:*/ false))); + for ((vote_pubkey, vote, _), is_gossip) in votes { Self::track_new_votes_and_notify_confirmations( vote, &vote_pubkey, @@ -960,7 +824,11 @@ mod tests { signature::{Keypair, Signature, Signer}, }, solana_vote_program::vote_state::Vote, - std::{collections::BTreeSet, sync::Arc}, + std::{ + collections::BTreeSet, + iter::repeat_with, + sync::{atomic::AtomicU64, Arc}, + }, }; #[test] @@ -983,76 +851,9 @@ mod tests { use bincode::serialized_size; info!("max vote size {}", serialized_size(&vote_tx).unwrap()); - let msgs = packet::to_packets_chunked(&[vote_tx], 1); // panics if won't fit + let packet_batches = packet::to_packet_batches(&[vote_tx], 1); // panics if won't fit - assert_eq!(msgs.len(), 1); - } - - fn run_vote_contains_authorized_voter(hash: Option) { - let node_keypair = Keypair::new(); - let vote_keypair = Keypair::new(); - let authorized_voter = Keypair::new(); - - let vote_tx = vote_transaction::new_vote_transaction( - vec![0], - Hash::default(), - Hash::default(), - &node_keypair, - &vote_keypair, - &authorized_voter, - hash, - ); - - // Check that the two signing keys pass the check - assert!(VoteTracker::vote_contains_authorized_voter( - &vote_tx, - &node_keypair.pubkey() - )); - - assert!(VoteTracker::vote_contains_authorized_voter( - &vote_tx, - &authorized_voter.pubkey() - )); - - // Non signing key shouldn't pass the check - assert!(!VoteTracker::vote_contains_authorized_voter( - &vote_tx, - &vote_keypair.pubkey() - )); - - // Set the authorized voter == vote keypair - let vote_tx = vote_transaction::new_vote_transaction( - vec![0], - Hash::default(), - Hash::default(), - &node_keypair, - &vote_keypair, - &vote_keypair, - hash, - ); - - // Check that the node_keypair and vote keypair pass the authorized voter check - assert!(VoteTracker::vote_contains_authorized_voter( - &vote_tx, - &node_keypair.pubkey() - )); - - assert!(VoteTracker::vote_contains_authorized_voter( - &vote_tx, - &vote_keypair.pubkey() - )); - - // The other keypair should not pass the check - assert!(!VoteTracker::vote_contains_authorized_voter( - &vote_tx, - &authorized_voter.pubkey() - )); - } - - #[test] - fn test_vote_contains_authorized_voter() { - run_vote_contains_authorized_voter(None); - run_vote_contains_authorized_voter(Some(Hash::default())); + assert_eq!(packet_batches.len(), 1); } #[test] @@ -1088,15 +889,11 @@ mod tests { .get_first_slot_in_epoch(current_epoch + 1), ); vote_tracker.progress_with_new_root_bank(&new_epoch_bank); - assert_eq!( - *vote_tracker.current_epoch.read().unwrap(), - current_epoch + 1 - ); } #[test] fn test_update_new_leader_schedule_epoch() { - let (vote_tracker, bank, _, _) = setup(); + let (_, bank, _, _) = setup(); // Check outdated slots are purged with new root let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot()); @@ -1114,25 +911,6 @@ mod tests { bank.get_leader_schedule_epoch(next_leader_schedule_computed), next_leader_schedule_epoch ); - let next_leader_schedule_bank = - Bank::new_from_parent(&bank, &Pubkey::default(), next_leader_schedule_computed); - vote_tracker.progress_leader_schedule_epoch(&next_leader_schedule_bank); - assert_eq!( - *vote_tracker.leader_schedule_epoch.read().unwrap(), - next_leader_schedule_epoch - ); - assert_eq!( - vote_tracker - .epoch_authorized_voters - .read() - .unwrap() - .get(&next_leader_schedule_epoch) - .unwrap(), - next_leader_schedule_bank - .epoch_stakes(next_leader_schedule_epoch) - .unwrap() - .epoch_authorized_voters() - ); } #[test] @@ -1574,59 +1352,6 @@ mod tests { run_test_process_votes3(Some(Hash::default())); } - #[test] - fn test_get_voters_by_epoch() { - // Create some voters at genesis - let (vote_tracker, bank, validator_voting_keypairs, _) = setup(); - let last_known_epoch = bank.get_leader_schedule_epoch(bank.slot()); - let last_known_slot = bank - .epoch_schedule() - .get_last_slot_in_epoch(last_known_epoch); - - // Check we can get the authorized voters - for keypairs in &validator_voting_keypairs { - assert!(vote_tracker - .get_authorized_voter(&keypairs.vote_keypair.pubkey(), last_known_slot) - .is_some()); - assert!(vote_tracker - .get_authorized_voter(&keypairs.vote_keypair.pubkey(), last_known_slot + 1) - .is_none()); - } - - // Create the set of relevant voters for the next epoch - let new_epoch = last_known_epoch + 1; - let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch); - let new_keypairs: Vec<_> = (0..10).map(|_| ValidatorVoteKeypairs::new_rand()).collect(); - let new_epoch_authorized_voters: HashMap<_, _> = new_keypairs - .iter() - .chain(validator_voting_keypairs[0..5].iter()) - .map(|keypair| (keypair.vote_keypair.pubkey(), keypair.vote_keypair.pubkey())) - .collect(); - - vote_tracker - .epoch_authorized_voters - .write() - .unwrap() - .insert(new_epoch, Arc::new(new_epoch_authorized_voters)); - - // These keypairs made it into the new epoch - for keypairs in new_keypairs - .iter() - .chain(validator_voting_keypairs[0..5].iter()) - { - assert!(vote_tracker - .get_authorized_voter(&keypairs.vote_keypair.pubkey(), first_slot_in_new_epoch) - .is_some()); - } - - // These keypairs were not refreshed in new epoch - for keypairs in validator_voting_keypairs[5..10].iter() { - assert!(vote_tracker - .get_authorized_voter(&keypairs.vote_keypair.pubkey(), first_slot_in_new_epoch) - .is_none()); - } - } - #[test] fn test_vote_tracker_references() { // Create some voters at genesis @@ -1646,8 +1371,10 @@ mod tests { let vote_tracker = VoteTracker::new(&bank); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, @@ -1690,17 +1417,6 @@ mod tests { // Setup next epoch let old_epoch = bank.get_leader_schedule_epoch(bank.slot()); let new_epoch = old_epoch + 1; - let new_epoch_vote_accounts: HashMap<_, _> = vec![( - validator0_keypairs.vote_keypair.pubkey(), - validator0_keypairs.vote_keypair.pubkey(), - )] - .into_iter() - .collect(); - vote_tracker - .epoch_authorized_voters - .write() - .unwrap() - .insert(new_epoch, Arc::new(new_epoch_vote_accounts)); // Test with votes across two epochs let first_slot_in_new_epoch = bank.epoch_schedule().get_first_slot_in_epoch(new_epoch); @@ -1765,36 +1481,15 @@ mod tests { let bank = bank_forks.read().unwrap().get(0).unwrap().clone(); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, )); - // Integrity Checks - let current_epoch = bank.epoch(); - let leader_schedule_epoch = bank.get_leader_schedule_epoch(bank.slot()); - - // Check the vote tracker has all the known epoch state on construction - for epoch in current_epoch..=leader_schedule_epoch { - assert_eq!( - vote_tracker - .epoch_authorized_voters - .read() - .unwrap() - .get(&epoch) - .unwrap(), - bank.epoch_stakes(epoch).unwrap().epoch_authorized_voters() - ); - } - - // Check the epoch state is correct - assert_eq!( - *vote_tracker.leader_schedule_epoch.read().unwrap(), - leader_schedule_epoch, - ); - assert_eq!(*vote_tracker.current_epoch.read().unwrap(), current_epoch); ( Arc::new(vote_tracker), bank, @@ -1806,8 +1501,11 @@ mod tests { #[test] fn test_verify_votes_empty() { solana_logger::setup(); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = RwLock::new(BankForks::new(bank)); let votes = vec![]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes); + let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks); assert!(vote_txs.is_empty()); assert!(packets.is_empty()); } @@ -1815,30 +1513,45 @@ mod tests { fn verify_packets_len(packets: &[VerifiedVoteMetadata], ref_value: usize) { let num_packets: usize = packets .iter() - .map(|vote_metadata| vote_metadata.packet.packets.len()) + .map(|vote_metadata| vote_metadata.packet_batch.packets.len()) .sum(); assert_eq!(num_packets, ref_value); } - fn test_vote_tx(hash: Option) -> Transaction { - let node_keypair = Keypair::new(); - let vote_keypair = Keypair::new(); - let auth_voter_keypair = Keypair::new(); + fn test_vote_tx( + validator_vote_keypairs: Option<&ValidatorVoteKeypairs>, + hash: Option, + ) -> Transaction { + let other = ValidatorVoteKeypairs::new_rand(); + let validator_vote_keypair = validator_vote_keypairs.unwrap_or(&other); + // TODO authorized_voter_keypair should be different from vote-keypair + // but that is what create_genesis_... currently generates. vote_transaction::new_vote_transaction( vec![0], Hash::default(), Hash::default(), - &node_keypair, - &vote_keypair, - &auth_voter_keypair, + &validator_vote_keypair.node_keypair, + &validator_vote_keypair.vote_keypair, + &validator_vote_keypair.vote_keypair, // authorized_voter_keypair hash, ) } fn run_test_verify_votes_1_pass(hash: Option) { - let vote_tx = test_vote_tx(hash); + let voting_keypairs: Vec<_> = repeat_with(ValidatorVoteKeypairs::new_rand) + .take(10) + .collect(); + let GenesisConfigInfo { genesis_config, .. } = + genesis_utils::create_genesis_config_with_vote_accounts( + 10_000, // mint_lamports + &voting_keypairs, + vec![100; voting_keypairs.len()], // stakes + ); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = RwLock::new(BankForks::new(bank)); + let vote_tx = test_vote_tx(voting_keypairs.first(), hash); let votes = vec![vote_tx]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes); + let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks); assert_eq!(vote_txs.len(), 1); verify_packets_len(&packets, 1); } @@ -1850,11 +1563,22 @@ mod tests { } fn run_test_bad_vote(hash: Option) { - let vote_tx = test_vote_tx(hash); + let voting_keypairs: Vec<_> = repeat_with(ValidatorVoteKeypairs::new_rand) + .take(10) + .collect(); + let GenesisConfigInfo { genesis_config, .. } = + genesis_utils::create_genesis_config_with_vote_accounts( + 10_000, // mint_lamports + &voting_keypairs, + vec![100; voting_keypairs.len()], // stakes + ); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = RwLock::new(BankForks::new(bank)); + let vote_tx = test_vote_tx(voting_keypairs.first(), hash); let mut bad_vote = vote_tx.clone(); bad_vote.signatures[0] = Signature::default(); let votes = vec![vote_tx.clone(), bad_vote, vote_tx]; - let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes); + let (vote_txs, packets) = ClusterInfoVoteListener::verify_votes(votes, &bank_forks); assert_eq!(vote_txs.len(), 2); verify_packets_len(&packets, 2); } diff --git a/core/src/commitment_service.rs b/core/src/commitment_service.rs index f918065d024724..dcdc769f7883b3 100644 --- a/core/src/commitment_service.rs +++ b/core/src/commitment_service.rs @@ -97,11 +97,8 @@ impl AggregateCommitmentService { return Ok(()); } - let mut aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?; - - while let Ok(new_data) = receiver.try_recv() { - aggregation_data = new_data; - } + let aggregation_data = receiver.recv_timeout(Duration::from_secs(1))?; + let aggregation_data = receiver.try_iter().last().unwrap_or(aggregation_data); let ancestors = aggregation_data.bank.status_cache_ancestors(); if ancestors.is_empty() { @@ -506,11 +503,7 @@ mod tests { let validator_vote_keypairs = ValidatorVoteKeypairs::new_rand(); let validator_keypairs = vec![&validator_vote_keypairs]; - let GenesisConfigInfo { - genesis_config, - mint_keypair: _, - voting_keypair: _, - } = create_genesis_config_with_vote_accounts( + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, vec![100; 1], diff --git a/core/src/cost_update_service.rs b/core/src/cost_update_service.rs index 0ebee0c09c45ee..c21c78869cd83a 100644 --- a/core/src/cost_update_service.rs +++ b/core/src/cost_update_service.rs @@ -127,8 +127,10 @@ impl CostUpdateService { CostUpdate::FrozenBank { bank } => { bank.read_cost_tracker().unwrap().report_stats(bank.slot()); } - CostUpdate::ExecuteTiming { execute_timings } => { - dirty |= Self::update_cost_model(&cost_model, &execute_timings); + CostUpdate::ExecuteTiming { + mut execute_timings, + } => { + dirty |= Self::update_cost_model(&cost_model, &mut execute_timings); update_count += 1; } } @@ -151,16 +153,27 @@ impl CostUpdateService { } } - fn update_cost_model(cost_model: &RwLock, execute_timings: &ExecuteTimings) -> bool { + fn update_cost_model( + cost_model: &RwLock, + execute_timings: &mut ExecuteTimings, + ) -> bool { let mut dirty = false; { - let mut cost_model_mutable = cost_model.write().unwrap(); - for (program_id, timing) in &execute_timings.details.per_program_timings { - if timing.count < 1 { + for (program_id, program_timings) in &mut execute_timings.details.per_program_timings { + let current_estimated_program_cost = + cost_model.read().unwrap().find_instruction_cost(program_id); + program_timings.coalesce_error_timings(current_estimated_program_cost); + + if program_timings.count < 1 { continue; } - let units = timing.accumulated_units / timing.count as u64; - match cost_model_mutable.upsert_instruction_cost(program_id, units) { + + let units = program_timings.accumulated_units / program_timings.count as u64; + match cost_model + .write() + .unwrap() + .upsert_instruction_cost(program_id, units) + { Ok(c) => { debug!( "after replayed into bank, instruction {:?} has averaged cost {}", @@ -213,8 +226,8 @@ mod tests { #[test] fn test_update_cost_model_with_empty_execute_timings() { let cost_model = Arc::new(RwLock::new(CostModel::default())); - let empty_execute_timings = ExecuteTimings::default(); - CostUpdateService::update_cost_model(&cost_model, &empty_execute_timings); + let mut empty_execute_timings = ExecuteTimings::default(); + CostUpdateService::update_cost_model(&cost_model, &mut empty_execute_timings); assert_eq!( 0, @@ -238,6 +251,7 @@ mod tests { { let accumulated_us: u64 = 1000; let accumulated_units: u64 = 100; + let total_errored_units = 0; let count: u32 = 10; expected_cost = accumulated_units / count as u64; @@ -247,9 +261,11 @@ mod tests { accumulated_us, accumulated_units, count, + errored_txs_compute_consumed: vec![], + total_errored_units, }, ); - CostUpdateService::update_cost_model(&cost_model, &execute_timings); + CostUpdateService::update_cost_model(&cost_model, &mut execute_timings); assert_eq!( 1, cost_model @@ -282,9 +298,11 @@ mod tests { accumulated_us, accumulated_units, count, + errored_txs_compute_consumed: vec![], + total_errored_units: 0, }, ); - CostUpdateService::update_cost_model(&cost_model, &execute_timings); + CostUpdateService::update_cost_model(&cost_model, &mut execute_timings); assert_eq!( 1, cost_model @@ -303,4 +321,106 @@ mod tests { ); } } + + #[test] + fn test_update_cost_model_with_error_execute_timings() { + let cost_model = Arc::new(RwLock::new(CostModel::default())); + let mut execute_timings = ExecuteTimings::default(); + let program_key_1 = Pubkey::new_unique(); + + // Test updating cost model with a `ProgramTiming` with no compute units accumulated, i.e. + // `accumulated_units` == 0 + { + execute_timings.details.per_program_timings.insert( + program_key_1, + ProgramTiming { + accumulated_us: 1000, + accumulated_units: 0, + count: 0, + errored_txs_compute_consumed: vec![], + total_errored_units: 0, + }, + ); + CostUpdateService::update_cost_model(&cost_model, &mut execute_timings); + // If both the `errored_txs_compute_consumed` is empty and `count == 0`, then + // nothing should be inserted into the cost model + assert!(cost_model + .read() + .unwrap() + .get_instruction_cost_table() + .is_empty()); + } + + // Test updating cost model with only erroring compute costs where the `cost_per_error` is + // greater than the current instruction cost for the program. Should update with the + // new erroring compute costs + let cost_per_error = 1000; + { + let errored_txs_compute_consumed = vec![cost_per_error; 3]; + let total_errored_units = errored_txs_compute_consumed.iter().sum(); + execute_timings.details.per_program_timings.insert( + program_key_1, + ProgramTiming { + accumulated_us: 1000, + accumulated_units: 0, + count: 0, + errored_txs_compute_consumed, + total_errored_units, + }, + ); + CostUpdateService::update_cost_model(&cost_model, &mut execute_timings); + assert_eq!( + 1, + cost_model + .read() + .unwrap() + .get_instruction_cost_table() + .len() + ); + assert_eq!( + Some(&cost_per_error), + cost_model + .read() + .unwrap() + .get_instruction_cost_table() + .get(&program_key_1) + ); + } + + // Test updating cost model with only erroring compute costs where the error cost is + // `smaller_cost_per_error`, less than the current instruction cost for the program. + // The cost should not decrease for these new lesser errors + let smaller_cost_per_error = cost_per_error - 10; + { + let errored_txs_compute_consumed = vec![smaller_cost_per_error; 3]; + let total_errored_units = errored_txs_compute_consumed.iter().sum(); + execute_timings.details.per_program_timings.insert( + program_key_1, + ProgramTiming { + accumulated_us: 1000, + accumulated_units: 0, + count: 0, + errored_txs_compute_consumed, + total_errored_units, + }, + ); + CostUpdateService::update_cost_model(&cost_model, &mut execute_timings); + assert_eq!( + 1, + cost_model + .read() + .unwrap() + .get_instruction_cost_table() + .len() + ); + assert_eq!( + Some(&cost_per_error), + cost_model + .read() + .unwrap() + .get_instruction_cost_table() + .get(&program_key_1) + ); + } + } } diff --git a/core/src/fetch_stage.rs b/core/src/fetch_stage.rs index 248d3bf65d6181..a560a5c2810b0d 100644 --- a/core/src/fetch_stage.rs +++ b/core/src/fetch_stage.rs @@ -6,10 +6,13 @@ use { result::{Error, Result}, }, solana_metrics::{inc_new_counter_debug, inc_new_counter_info}, - solana_perf::{packet::PacketsRecycler, recycler::Recycler}, + solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, solana_poh::poh_recorder::PohRecorder, - solana_sdk::clock::DEFAULT_TICKS_PER_SLOT, - solana_streamer::streamer::{self, PacketReceiver, PacketSender}, + solana_sdk::{ + clock::DEFAULT_TICKS_PER_SLOT, + packet::{Packet, PacketFlags}, + }, + solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender}, std::{ net::UdpSocket, sync::{ @@ -34,7 +37,7 @@ impl FetchStage { exit: &Arc, poh_recorder: &Arc>, coalesce_ms: u64, - ) -> (Self, PacketReceiver, PacketReceiver) { + ) -> (Self, PacketBatchReceiver, PacketBatchReceiver) { let (sender, receiver) = channel(); let (vote_sender, vote_receiver) = channel(); ( @@ -58,8 +61,8 @@ impl FetchStage { tpu_forwards_sockets: Vec, tpu_vote_sockets: Vec, exit: &Arc, - sender: &PacketSender, - vote_sender: &PacketSender, + sender: &PacketBatchSender, + vote_sender: &PacketBatchSender, poh_recorder: &Arc>, coalesce_ms: u64, ) -> Self { @@ -79,18 +82,24 @@ impl FetchStage { } fn handle_forwarded_packets( - recvr: &PacketReceiver, - sendr: &PacketSender, + recvr: &PacketBatchReceiver, + sendr: &PacketBatchSender, poh_recorder: &Arc>, ) -> Result<()> { - let msgs = recvr.recv()?; - let mut len = msgs.packets.len(); - let mut batch = vec![msgs]; - while let Ok(more) = recvr.try_recv() { - len += more.packets.len(); - batch.push(more); + let mark_forwarded = |packet: &mut Packet| { + packet.meta.flags |= PacketFlags::FORWARDED; + }; + + let mut packet_batch = recvr.recv()?; + let mut num_packets = packet_batch.packets.len(); + packet_batch.packets.iter_mut().for_each(mark_forwarded); + let mut packet_batches = vec![packet_batch]; + while let Ok(mut packet_batch) = recvr.try_recv() { + packet_batch.packets.iter_mut().for_each(mark_forwarded); + num_packets += packet_batch.packets.len(); + packet_batches.push(packet_batch); // Read at most 1K transactions in a loop - if len > 1024 { + if num_packets > 1024 { break; } } @@ -100,33 +109,33 @@ impl FetchStage { .unwrap() .would_be_leader(HOLD_TRANSACTIONS_SLOT_OFFSET.saturating_mul(DEFAULT_TICKS_PER_SLOT)) { - inc_new_counter_debug!("fetch_stage-honor_forwards", len); - for packets in batch { + inc_new_counter_debug!("fetch_stage-honor_forwards", num_packets); + for packet_batch in packet_batches { #[allow(clippy::question_mark)] - if sendr.send(packets).is_err() { + if sendr.send(packet_batch).is_err() { return Err(Error::Send); } } } else { - inc_new_counter_info!("fetch_stage-discard_forwards", len); + inc_new_counter_info!("fetch_stage-discard_forwards", num_packets); } Ok(()) } fn new_multi_socket( - sockets: Vec>, + tpu_sockets: Vec>, tpu_forwards_sockets: Vec>, tpu_vote_sockets: Vec>, exit: &Arc, - sender: &PacketSender, - vote_sender: &PacketSender, + sender: &PacketBatchSender, + vote_sender: &PacketBatchSender, poh_recorder: &Arc>, coalesce_ms: u64, ) -> Self { - let recycler: PacketsRecycler = Recycler::warmed(1000, 1024); + let recycler: PacketBatchRecycler = Recycler::warmed(1000, 1024); - let tpu_threads = sockets.into_iter().map(|socket| { + let tpu_threads = tpu_sockets.into_iter().map(|socket| { streamer::receiver( socket, exit, diff --git a/core/src/ledger_cleanup_service.rs b/core/src/ledger_cleanup_service.rs index 3486a24ad21677..841609d3cc3685 100644 --- a/core/src/ledger_cleanup_service.rs +++ b/core/src/ledger_cleanup_service.rs @@ -164,12 +164,9 @@ impl LedgerCleanupService { } fn receive_new_roots(new_root_receiver: &Receiver) -> Result { - let mut root = new_root_receiver.recv_timeout(Duration::from_secs(1))?; + let root = new_root_receiver.recv_timeout(Duration::from_secs(1))?; // Get the newest root - while let Ok(new_root) = new_root_receiver.try_recv() { - root = new_root; - } - Ok(root) + Ok(new_root_receiver.try_iter().last().unwrap_or(root)) } pub fn cleanup_ledger( diff --git a/core/src/progress_map.rs b/core/src/progress_map.rs index 11758dfbf73eb5..bea2ceb244853d 100644 --- a/core/src/progress_map.rs +++ b/core/src/progress_map.rs @@ -129,16 +129,19 @@ impl ReplaySlotStats { .iter() .collect(); per_pubkey_timings.sort_by(|a, b| b.1.accumulated_us.cmp(&a.1.accumulated_us)); - let (total_us, total_units, total_count) = - per_pubkey_timings - .iter() - .fold((0, 0, 0), |(sum_us, sum_units, sum_count), a| { + let (total_us, total_units, total_count, total_errored_units, total_errored_count) = + per_pubkey_timings.iter().fold( + (0, 0, 0, 0, 0), + |(sum_us, sum_units, sum_count, sum_errored_units, sum_errored_count), a| { ( sum_us + a.1.accumulated_us, sum_units + a.1.accumulated_units, sum_count + a.1.count, + sum_errored_units + a.1.total_errored_units, + sum_errored_count + a.1.errored_txs_compute_consumed.len(), ) - }); + }, + ); for (pubkey, time) in per_pubkey_timings.iter().take(5) { datapoint_info!( @@ -147,7 +150,13 @@ impl ReplaySlotStats { ("pubkey", pubkey.to_string(), String), ("execute_us", time.accumulated_us, i64), ("accumulated_units", time.accumulated_units, i64), - ("count", time.count, i64) + ("errored_units", time.total_errored_units, i64), + ("count", time.count, i64), + ( + "errored_count", + time.errored_txs_compute_consumed.len(), + i64 + ), ); } datapoint_info!( @@ -156,7 +165,9 @@ impl ReplaySlotStats { ("pubkey", "all", String), ("execute_us", total_us, i64), ("accumulated_units", total_units, i64), - ("count", total_count, i64) + ("count", total_count, i64), + ("errored_units", total_errored_units, i64), + ("count", total_errored_count, i64) ); } } diff --git a/core/src/qos_service.rs b/core/src/qos_service.rs index 435ed564411801..feea54cac153d3 100644 --- a/core/src/qos_service.rs +++ b/core/src/qos_service.rs @@ -78,13 +78,12 @@ impl QosService { pub fn compute_transaction_costs<'a>( &self, transactions: impl Iterator, - demote_program_write_locks: bool, ) -> Vec { let mut compute_cost_time = Measure::start("compute_cost_time"); let cost_model = self.cost_model.read().unwrap(); let txs_costs: Vec<_> = transactions .map(|tx| { - let cost = cost_model.calculate_cost(tx, demote_program_write_locks); + let cost = cost_model.calculate_cost(tx); debug!( "transaction {:?}, cost {:?}, cost sum {}", tx, @@ -250,7 +249,7 @@ mod tests { let cost_model = Arc::new(RwLock::new(CostModel::default())); let qos_service = QosService::new(cost_model.clone()); - let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false); + let txs_costs = qos_service.compute_transaction_costs(txs.iter()); // verify the size of txs_costs and its contents assert_eq!(txs_costs.len(), txs.len()); @@ -260,11 +259,7 @@ mod tests { .map(|(index, cost)| { assert_eq!( cost.sum(), - cost_model - .read() - .unwrap() - .calculate_cost(&txs[index], false) - .sum() + cost_model.read().unwrap().calculate_cost(&txs[index]).sum() ); }) .collect_vec(); @@ -295,14 +290,14 @@ mod tests { let transfer_tx_cost = cost_model .read() .unwrap() - .calculate_cost(&transfer_tx, false) + .calculate_cost(&transfer_tx) .sum(); // make a vec of txs let txs = vec![transfer_tx.clone(), vote_tx.clone(), transfer_tx, vote_tx]; let qos_service = QosService::new(cost_model); - let txs_costs = qos_service.compute_transaction_costs(txs.iter(), false); + let txs_costs = qos_service.compute_transaction_costs(txs.iter()); // set cost tracker limit to fit 1 transfer tx, vote tx bypasses limit check let cost_limit = transfer_tx_cost; @@ -348,7 +343,7 @@ mod tests { .name("test-producer-1".to_string()) .spawn(move || { debug!("thread 1 starts with {} txs", txs_1.len()); - let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter(), false); + let tx_costs = qos_service_1.compute_transaction_costs(txs_1.iter()); assert_eq!(txs_count, tx_costs.len()); debug!( "thread 1 done, generated {} count, see service count as {}", @@ -365,7 +360,7 @@ mod tests { .name("test-producer-2".to_string()) .spawn(move || { debug!("thread 2 starts with {} txs", txs_2.len()); - let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter(), false); + let tx_costs = qos_service_2.compute_transaction_costs(txs_2.iter()); assert_eq!(txs_count, tx_costs.len()); debug!( "thread 2 done, generated {} count, see service count as {}", diff --git a/core/src/repair_generic_traversal.rs b/core/src/repair_generic_traversal.rs index 8f35f67498e852..b780810a8cfc5b 100644 --- a/core/src/repair_generic_traversal.rs +++ b/core/src/repair_generic_traversal.rs @@ -57,7 +57,7 @@ pub fn get_unknown_last_index( .entry(slot) .or_insert_with(|| blockstore.meta(slot).unwrap()); if let Some(slot_meta) = slot_meta { - if slot_meta.known_last_index().is_none() { + if slot_meta.last_index.is_none() { let shred_index = blockstore.get_index(slot).unwrap(); let num_processed_shreds = if let Some(shred_index) = shred_index { shred_index.data().num_shreds() as u64 @@ -86,17 +86,17 @@ fn get_unrepaired_path( ) -> Vec { let mut path = Vec::new(); let mut slot = start_slot; - while !visited.contains(&slot) { - visited.insert(slot); + while visited.insert(slot) { let slot_meta = slot_meta_cache .entry(slot) .or_insert_with(|| blockstore.meta(slot).unwrap()); if let Some(slot_meta) = slot_meta { - if slot_meta.is_full() { - break; + if !slot_meta.is_full() { + path.push(slot); + if let Some(parent_slot) = slot_meta.parent_slot { + slot = parent_slot + } } - path.push(slot); - slot = slot_meta.parent_slot; } } path.reverse(); @@ -123,7 +123,7 @@ pub fn get_closest_completion( if slot_meta.is_full() { continue; } - if let Some(last_index) = slot_meta.known_last_index() { + if let Some(last_index) = slot_meta.last_index { let shred_index = blockstore.get_index(slot).unwrap(); let dist = if let Some(shred_index) = shred_index { let shred_count = shred_index.data().num_shreds() as u64; diff --git a/core/src/repair_response.rs b/core/src/repair_response.rs index 3c5c30aaee5cac..adefc3a5736792 100644 --- a/core/src/repair_response.rs +++ b/core/src/repair_response.rs @@ -56,7 +56,10 @@ mod test { shred::{Shred, Shredder}, sigverify_shreds::verify_shred_cpu, }, - solana_sdk::signature::{Keypair, Signer}, + solana_sdk::{ + packet::PacketFlags, + signature::{Keypair, Signer}, + }, std::{ collections::HashMap, net::{IpAddr, Ipv4Addr}, @@ -87,7 +90,7 @@ mod test { nonce, ) .unwrap(); - packet.meta.repair = true; + packet.meta.flags |= PacketFlags::REPAIR; let leader_slots = [(slot, keypair.pubkey().to_bytes())] .iter() diff --git a/core/src/repair_service.rs b/core/src/repair_service.rs index df5e25e7051cca..d87430423cba68 100644 --- a/core/src/repair_service.rs +++ b/core/src/repair_service.rs @@ -201,6 +201,7 @@ impl RepairService { blockstore: Arc, exit: Arc, repair_socket: Arc, + ancestor_hashes_socket: Arc, repair_info: RepairInfo, verified_vote_receiver: VerifiedVoteReceiver, outstanding_requests: Arc>, @@ -225,11 +226,10 @@ impl RepairService { .unwrap() }; - let ancestor_hashes_request_socket = Arc::new(UdpSocket::bind("0.0.0.0:0").unwrap()); let ancestor_hashes_service = AncestorHashesService::new( exit, blockstore, - ancestor_hashes_request_socket, + ancestor_hashes_socket, repair_info, ancestor_hashes_replay_update_receiver, ); diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index 83f633fd8ad611..9222b3a3a5ade1 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -26,6 +26,7 @@ use { voting_service::VoteOp, window_service::DuplicateSlotReceiver, }, + solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, solana_client::rpc_response::SlotUpdate, solana_entry::entry::VerifyRecyclers, solana_gossip::cluster_info::ClusterInfo, @@ -327,6 +328,7 @@ impl ReplayStage { cost_update_sender: Sender, voting_sender: Sender, drop_bank_sender: Sender>>, + block_metadata_notifier: Option, ) -> Self { let ReplayStageConfig { vote_account, @@ -432,6 +434,7 @@ impl ReplayStage { &cost_update_sender, &mut duplicate_slots_to_repair, &ancestor_hashes_replay_update_sender, + block_metadata_notifier.clone(), ); replay_active_banks_time.stop(); @@ -1459,7 +1462,7 @@ impl ReplayStage { ); let root_distance = poh_slot - root_slot; - const MAX_ROOT_DISTANCE_FOR_VOTE_ONLY: Slot = 500; + const MAX_ROOT_DISTANCE_FOR_VOTE_ONLY: Slot = 400; let vote_only_bank = if root_distance > MAX_ROOT_DISTANCE_FOR_VOTE_ONLY { datapoint_info!("vote-only-bank", ("slot", poh_slot, i64)); true @@ -1988,6 +1991,7 @@ impl ReplayStage { cost_update_sender: &Sender, duplicate_slots_to_repair: &mut DuplicateSlotsToRepair, ancestor_hashes_replay_update_sender: &AncestorHashesReplayUpdateSender, + block_metadata_notifier: Option, ) -> bool { let mut did_complete_bank = false; let mut tx_count = 0; @@ -2143,6 +2147,16 @@ impl ReplayStage { } } Self::record_rewards(&bank, rewards_recorder_sender); + if let Some(ref block_metadata_notifier) = block_metadata_notifier { + let block_metadata_notifier = block_metadata_notifier.read().unwrap(); + block_metadata_notifier.notify_block_metadata( + bank.slot(), + &bank.last_blockhash().to_string(), + &bank.rewards, + Some(bank.clock().unix_timestamp), + Some(bank.block_height()), + ) + } } else { trace!( "bank {} not completed tick_height: {}, max_tick_height: {}", @@ -3033,8 +3047,10 @@ pub mod tests { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(bank_forks); let exit = Arc::new(AtomicBool::new(false)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, @@ -3568,8 +3584,10 @@ pub mod tests { &replay_vote_sender, &VerifyRecyclers::default(), ); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -3636,8 +3654,10 @@ pub mod tests { let exit = Arc::new(AtomicBool::new(false)); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache.clone(), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), diff --git a/core/src/retransmit_stage.rs b/core/src/retransmit_stage.rs index dacf89e06dca87..da678dac8ea2c6 100644 --- a/core/src/retransmit_stage.rs +++ b/core/src/retransmit_stage.rs @@ -24,10 +24,10 @@ use { solana_ledger::{ blockstore::Blockstore, leader_schedule_cache::LeaderScheduleCache, - shred::{Shred, ShredType}, + shred::{Shred, ShredId}, }, solana_measure::measure::Measure, - solana_perf::packet::Packets, + solana_perf::packet::PacketBatch, solana_rayon_threadlimit::get_thread_count, solana_rpc::{max_slots::MaxSlots, rpc_subscriptions::RpcSubscriptions}, solana_runtime::{bank::Bank, bank_forks::BankForks}, @@ -145,13 +145,13 @@ impl RetransmitStats { } // Map of shred (slot, index, type) => list of hash values seen for that key. -type ShredFilter = LruCache<(Slot, u32, ShredType), Vec>; +type ShredFilter = LruCache>; type ShredFilterAndHasher = (ShredFilter, PacketHasher); // Returns true if shred is already received and should skip retransmit. fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex) -> bool { - let key = (shred.slot(), shred.index(), shred.shred_type()); + let key = shred.id(); let mut shreds_received = shreds_received.lock().unwrap(); let (cache, hasher) = shreds_received.deref_mut(); match cache.get_mut(&key) { @@ -433,7 +433,8 @@ impl RetransmitStage { cluster_info: Arc, retransmit_sockets: Arc>, repair_socket: Arc, - verified_receiver: Receiver>, + ancestor_hashes_socket: Arc, + verified_receiver: Receiver>, exit: Arc, cluster_slots_update_receiver: ClusterSlotsUpdateReceiver, epoch_schedule: EpochSchedule, @@ -486,6 +487,7 @@ impl RetransmitStage { verified_receiver, retransmit_sender, repair_socket, + ancestor_hashes_socket, exit, repair_info, leader_schedule_cache, @@ -610,10 +612,10 @@ mod tests { let shred = Shred::new_from_data(0, 0, 0, None, true, true, 0, 0x20, 0); // it should send this over the sockets. retransmit_sender.send(vec![shred]).unwrap(); - let mut packets = Packets::new(vec![]); - solana_streamer::packet::recv_from(&mut packets, &me_retransmit, 1).unwrap(); - assert_eq!(packets.packets.len(), 1); - assert!(!packets.packets[0].meta.repair); + let mut packet_batch = PacketBatch::new(vec![]); + solana_streamer::packet::recv_from(&mut packet_batch, &me_retransmit, 1).unwrap(); + assert_eq!(packet_batch.packets.len(), 1); + assert!(!packet_batch.packets[0].meta.repair()); } #[test] @@ -639,19 +641,19 @@ mod tests { assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); - let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, version); + let shred = Shred::new_empty_coding(slot, index, 0, 1, 1, 0, version); // Coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // then blocked assert!(should_skip_retransmit(&shred, &shreds_received)); - let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, version); + let shred = Shred::new_empty_coding(slot, index, 2, 1, 1, 0, version); // 2nd unique coding at (1, 5) passes assert!(!should_skip_retransmit(&shred, &shreds_received)); // same again is blocked assert!(should_skip_retransmit(&shred, &shreds_received)); - let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, version); + let shred = Shred::new_empty_coding(slot, index, 3, 1, 1, 0, version); // Another unique coding at (1, 5) always blocked assert!(should_skip_retransmit(&shred, &shreds_received)); assert!(should_skip_retransmit(&shred, &shreds_received)); diff --git a/core/src/serve_repair.rs b/core/src/serve_repair.rs index 2086661e785818..f643302737448a 100644 --- a/core/src/serve_repair.rs +++ b/core/src/serve_repair.rs @@ -25,11 +25,11 @@ use { }, solana_measure::measure::Measure, solana_metrics::inc_new_counter_debug, - solana_perf::packet::{limited_deserialize, Packets, PacketsRecycler}, + solana_perf::packet::{limited_deserialize, PacketBatch, PacketBatchRecycler}, solana_sdk::{ clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, pubkey::Pubkey, timing::duration_as_ms, }, - solana_streamer::streamer::{PacketReceiver, PacketSender}, + solana_streamer::streamer::{PacketBatchReceiver, PacketBatchSender}, std::{ collections::HashSet, net::SocketAddr, @@ -229,12 +229,12 @@ impl ServeRepair { fn handle_repair( me: &Arc>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, request: RepairProtocol, stats: &mut ServeRepairStats, - ) -> Option { + ) -> Option { let now = Instant::now(); let my_id = me.read().unwrap().my_id(); @@ -317,10 +317,10 @@ impl ServeRepair { /// Process messages from the network fn run_listen( obj: &Arc>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, blockstore: Option<&Arc>, - requests_receiver: &PacketReceiver, - response_sender: &PacketSender, + requests_receiver: &PacketBatchReceiver, + response_sender: &PacketBatchSender, stats: &mut ServeRepairStats, max_packets: &mut usize, ) -> Result<()> { @@ -392,12 +392,12 @@ impl ServeRepair { pub fn listen( me: Arc>, blockstore: Option>, - requests_receiver: PacketReceiver, - response_sender: PacketSender, + requests_receiver: PacketBatchReceiver, + response_sender: PacketBatchSender, exit: &Arc, ) -> JoinHandle<()> { let exit = exit.clone(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); Builder::new() .name("solana-repair-listen".to_string()) .spawn(move || { @@ -432,14 +432,14 @@ impl ServeRepair { fn handle_packets( me: &Arc>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, blockstore: Option<&Arc>, - packets: Packets, - response_sender: &PacketSender, + packet_batch: PacketBatch, + response_sender: &PacketBatchSender, stats: &mut ServeRepairStats, ) { // iter over the packets - packets.packets.iter().for_each(|packet| { + packet_batch.packets.iter().for_each(|packet| { let from_addr = packet.meta.addr(); limited_deserialize(&packet.data[..packet.meta.size]) .into_iter() @@ -609,7 +609,7 @@ impl ServeRepair { } fn run_window_request( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from: &ContactInfo, from_addr: &SocketAddr, blockstore: Option<&Arc>, @@ -617,7 +617,7 @@ impl ServeRepair { slot: Slot, shred_index: u64, nonce: Nonce, - ) -> Option { + ) -> Option { if let Some(blockstore) = blockstore { // Try to find the requested index in one of the slots let packet = repair_response::repair_response_packet( @@ -630,7 +630,7 @@ impl ServeRepair { if let Some(packet) = packet { inc_new_counter_debug!("serve_repair-window-request-ledger", 1); - return Some(Packets::new_unpinned_with_recycler_data( + return Some(PacketBatch::new_unpinned_with_recycler_data( recycler, "run_window_request", vec![packet], @@ -651,13 +651,13 @@ impl ServeRepair { } fn run_highest_window_request( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, slot: Slot, highest_index: u64, nonce: Nonce, - ) -> Option { + ) -> Option { let blockstore = blockstore?; // Try to find the requested index in one of the slots let meta = blockstore.meta(slot).ok()??; @@ -670,7 +670,7 @@ impl ServeRepair { from_addr, nonce, )?; - return Some(Packets::new_unpinned_with_recycler_data( + return Some(PacketBatch::new_unpinned_with_recycler_data( recycler, "run_highest_window_request", vec![packet], @@ -680,14 +680,14 @@ impl ServeRepair { } fn run_orphan( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, mut slot: Slot, max_responses: usize, nonce: Nonce, - ) -> Option { - let mut res = Packets::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan"); + ) -> Option { + let mut res = PacketBatch::new_unpinned_with_recycler(recycler.clone(), 64, "run_orphan"); if let Some(blockstore) = blockstore { // Try to find the next "n" parent slots of the input slot while let Ok(Some(meta)) = blockstore.meta(slot) { @@ -706,8 +706,8 @@ impl ServeRepair { } else { break; } - if meta.is_parent_set() && res.packets.len() <= max_responses { - slot = meta.parent_slot; + if meta.parent_slot.is_some() && res.packets.len() <= max_responses { + slot = meta.parent_slot.unwrap(); } else { break; } @@ -720,12 +720,12 @@ impl ServeRepair { } fn run_ancestor_hashes( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, from_addr: &SocketAddr, blockstore: Option<&Arc>, slot: Slot, nonce: Nonce, - ) -> Option { + ) -> Option { let blockstore = blockstore?; let ancestor_slot_hashes = if blockstore.is_duplicate_confirmed(slot) { let ancestor_iterator = @@ -746,7 +746,7 @@ impl ServeRepair { from_addr, nonce, )?; - Some(Packets::new_unpinned_with_recycler_data( + Some(PacketBatch::new_unpinned_with_recycler_data( recycler, "run_ancestor_hashes", vec![packet], @@ -778,7 +778,7 @@ mod tests { /// test run_window_request responds with the right shred, and do not overrun fn run_highest_window_request(slot: Slot, num_slots: u64, nonce: Nonce) { - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { @@ -848,7 +848,7 @@ mod tests { /// test window requests respond with the right shred, and do not overrun fn run_window_request(slot: Slot, nonce: Nonce) { - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); solana_logger::setup(); let ledger_path = get_tmp_ledger_path!(); { @@ -1017,7 +1017,7 @@ mod tests { fn run_orphan(slot: Slot, num_slots: u64, nonce: Nonce) { solana_logger::setup(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); @@ -1091,7 +1091,7 @@ mod tests { #[test] fn run_orphan_corrupted_shred_size() { solana_logger::setup(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { let blockstore = Arc::new(Blockstore::open(&ledger_path).unwrap()); @@ -1152,7 +1152,7 @@ mod tests { #[test] fn test_run_ancestor_hashes() { solana_logger::setup(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let ledger_path = get_tmp_ledger_path!(); { let slot = 0; diff --git a/core/src/shred_fetch_stage.rs b/core/src/shred_fetch_stage.rs index a0d02ba4a14caa..d231ae8b931ead 100644 --- a/core/src/shred_fetch_stage.rs +++ b/core/src/shred_fetch_stage.rs @@ -6,12 +6,12 @@ use { solana_ledger::shred::{get_shred_slot_index_type, ShredFetchStats}, solana_perf::{ cuda_runtime::PinnedVec, - packet::{Packet, PacketsRecycler}, + packet::{Packet, PacketBatchRecycler, PacketFlags}, recycler::Recycler, }, solana_runtime::bank_forks::BankForks, solana_sdk::clock::{Slot, DEFAULT_MS_PER_SLOT}, - solana_streamer::streamer::{self, PacketReceiver, PacketSender}, + solana_streamer::streamer::{self, PacketBatchReceiver, PacketBatchSender}, std::{ net::UdpSocket, sync::{atomic::AtomicBool, mpsc::channel, Arc, RwLock}, @@ -40,7 +40,7 @@ impl ShredFetchStage { ) where F: Fn(&mut Packet), { - p.meta.discard = true; + p.meta.set_discard(true); if let Some((slot, _index, _shred_type)) = get_shred_slot_index_type(p, stats) { // Seems reasonable to limit shreds to 2 epochs away if slot > last_root && slot < (last_slot + 2 * slots_per_epoch) { @@ -50,7 +50,7 @@ impl ShredFetchStage { if shreds_received.get(&hash).is_none() { shreds_received.put(hash, ()); - p.meta.discard = false; + p.meta.set_discard(false); modify(p); } else { stats.duplicate_shred += 1; @@ -63,8 +63,8 @@ impl ShredFetchStage { // updates packets received on a channel and sends them on another channel fn modify_packets( - recvr: PacketReceiver, - sendr: PacketSender, + recvr: PacketBatchReceiver, + sendr: PacketBatchSender, bank_forks: Option>>, name: &'static str, modify: F, @@ -83,7 +83,7 @@ impl ShredFetchStage { let mut stats = ShredFetchStats::default(); let mut packet_hasher = PacketHasher::default(); - while let Some(mut p) = recvr.iter().next() { + while let Some(mut packet_batch) = recvr.iter().next() { if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT { last_updated = Instant::now(); packet_hasher.reset(); @@ -97,8 +97,8 @@ impl ShredFetchStage { slots_per_epoch = root_bank.get_slots_in_epoch(root_bank.epoch()); } } - stats.shred_count += p.packets.len(); - p.packets.iter_mut().for_each(|packet| { + stats.shred_count += packet_batch.packets.len(); + packet_batch.packets.iter_mut().for_each(|packet| { Self::process_packet( packet, &mut shreds_received, @@ -124,7 +124,7 @@ impl ShredFetchStage { stats = ShredFetchStats::default(); last_stats = Instant::now(); } - if sendr.send(p).is_err() { + if sendr.send(packet_batch).is_err() { break; } } @@ -133,7 +133,7 @@ impl ShredFetchStage { fn packet_modifier( sockets: Vec>, exit: &Arc, - sender: PacketSender, + sender: PacketBatchSender, recycler: Recycler>, bank_forks: Option>>, name: &'static str, @@ -169,11 +169,11 @@ impl ShredFetchStage { sockets: Vec>, forward_sockets: Vec>, repair_socket: Arc, - sender: &PacketSender, + sender: &PacketBatchSender, bank_forks: Option>>, exit: &Arc, ) -> Self { - let recycler: PacketsRecycler = Recycler::warmed(100, 1024); + let recycler: PacketBatchRecycler = Recycler::warmed(100, 1024); let (mut tvu_threads, tvu_filter) = Self::packet_modifier( sockets, @@ -192,7 +192,7 @@ impl ShredFetchStage { recycler.clone(), bank_forks.clone(), "shred_fetch_tvu_forwards", - |p| p.meta.forward = true, + |p| p.meta.flags.insert(PacketFlags::FORWARDED), ); let (repair_receiver, repair_handler) = Self::packet_modifier( @@ -202,7 +202,7 @@ impl ShredFetchStage { recycler, bank_forks, "shred_fetch_repair", - |p| p.meta.repair = true, + |p| p.meta.flags.insert(PacketFlags::REPAIR), ); tvu_threads.extend(tvu_forwards_threads.into_iter()); @@ -266,10 +266,11 @@ mod tests { &|_p| {}, &hasher, ); - assert!(!packet.meta.discard); + assert!(!packet.meta.discard()); let coding = solana_ledger::shred::Shredder::generate_coding_shreds( &[shred], false, // is_last_in_slot + 3, // next_code_index ); coding[0].copy_to_packet(&mut packet); ShredFetchStage::process_packet( @@ -282,7 +283,7 @@ mod tests { &|_p| {}, &hasher, ); - assert!(!packet.meta.discard); + assert!(!packet.meta.discard()); } #[test] @@ -309,7 +310,7 @@ mod tests { &hasher, ); assert_eq!(stats.index_overrun, 1); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); let shred = Shred::new_from_data(1, 3, 0, None, true, true, 0, 0, 0); shred.copy_to_packet(&mut packet); @@ -324,7 +325,7 @@ mod tests { &|_p| {}, &hasher, ); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); // Accepted for 1,3 ShredFetchStage::process_packet( @@ -337,7 +338,7 @@ mod tests { &|_p| {}, &hasher, ); - assert!(!packet.meta.discard); + assert!(!packet.meta.discard()); // shreds_received should filter duplicate ShredFetchStage::process_packet( @@ -350,7 +351,7 @@ mod tests { &|_p| {}, &hasher, ); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); let shred = Shred::new_from_data(1_000_000, 3, 0, None, true, true, 0, 0, 0); shred.copy_to_packet(&mut packet); @@ -366,7 +367,7 @@ mod tests { &|_p| {}, &hasher, ); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); let index = MAX_DATA_SHREDS_PER_SLOT as u32; let shred = Shred::new_from_data(5, index, 0, None, true, true, 0, 0, 0); @@ -381,6 +382,6 @@ mod tests { &|_p| {}, &hasher, ); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); } } diff --git a/core/src/sigverify.rs b/core/src/sigverify.rs index 8ffa30bb84168a..74dbf5bdfc80ff 100644 --- a/core/src/sigverify.rs +++ b/core/src/sigverify.rs @@ -5,11 +5,11 @@ //! pub use solana_perf::sigverify::{ - batch_size, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset, + count_packets_in_batches, ed25519_verify_cpu, ed25519_verify_disabled, init, TxOffset, }; use { crate::sigverify_stage::SigVerifier, - solana_perf::{cuda_runtime::PinnedVec, packet::Packets, recycler::Recycler, sigverify}, + solana_perf::{cuda_runtime::PinnedVec, packet::PacketBatch, recycler::Recycler, sigverify}, }; #[derive(Clone)] @@ -40,13 +40,13 @@ impl Default for TransactionSigVerifier { } impl SigVerifier for TransactionSigVerifier { - fn verify_batch(&self, mut batch: Vec) -> Vec { + fn verify_batches(&self, mut batches: Vec) -> Vec { sigverify::ed25519_verify( - &mut batch, + &mut batches, &self.recycler, &self.recycler_out, self.reject_non_vote, ); - batch + batches } } diff --git a/core/src/sigverify_shreds.rs b/core/src/sigverify_shreds.rs index 08ebae0bb22d8d..626dc516f56391 100644 --- a/core/src/sigverify_shreds.rs +++ b/core/src/sigverify_shreds.rs @@ -5,7 +5,7 @@ use { leader_schedule_cache::LeaderScheduleCache, shred::Shred, sigverify_shreds::verify_shreds_gpu, }, - solana_perf::{self, packet::Packets, recycler_cache::RecyclerCache}, + solana_perf::{self, packet::PacketBatch, recycler_cache::RecyclerCache}, solana_runtime::bank_forks::BankForks, std::{ collections::{HashMap, HashSet}, @@ -32,7 +32,7 @@ impl ShredSigVerifier { recycler_cache: RecyclerCache::warmed(), } } - fn read_slots(batches: &[Packets]) -> HashSet { + fn read_slots(batches: &[PacketBatch]) -> HashSet { batches .iter() .flat_map(|batch| batch.packets.iter().filter_map(Shred::get_slot_from_packet)) @@ -41,7 +41,7 @@ impl ShredSigVerifier { } impl SigVerifier for ShredSigVerifier { - fn verify_batch(&self, mut batches: Vec) -> Vec { + fn verify_batches(&self, mut batches: Vec) -> Vec { let r_bank = self.bank_forks.read().unwrap().working_bank(); let slots: HashSet = Self::read_slots(&batches); let mut leader_slots: HashMap = slots @@ -88,13 +88,13 @@ pub mod tests { 0, 0xc0de, ); - let mut batch = [Packets::default(), Packets::default()]; + let mut batches = [PacketBatch::default(), PacketBatch::default()]; let keypair = Keypair::new(); Shredder::sign_shred(&keypair, &mut shred); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let mut shred = Shred::new_from_data( 0xc0de_dead, @@ -108,16 +108,16 @@ pub mod tests { 0xc0de, ); Shredder::sign_shred(&keypair, &mut shred); - batch[1].packets.resize(1, Packet::default()); - batch[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[1].packets[0].meta.size = shred.payload.len(); + batches[1].packets.resize(1, Packet::default()); + batches[1].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[1].packets[0].meta.size = shred.payload.len(); let expected: HashSet = [0xc0de_dead, 0xdead_c0de].iter().cloned().collect(); - assert_eq!(ShredSigVerifier::read_slots(&batch), expected); + assert_eq!(ShredSigVerifier::read_slots(&batches), expected); } #[test] - fn test_sigverify_shreds_verify_batch() { + fn test_sigverify_shreds_verify_batches() { let leader_keypair = Arc::new(Keypair::new()); let leader_pubkey = leader_keypair.pubkey(); let bank = Bank::new_for_tests( @@ -127,8 +127,8 @@ pub mod tests { let bf = Arc::new(RwLock::new(BankForks::new(bank))); let verifier = ShredSigVerifier::new(bf, cache); - let mut batch = vec![Packets::default()]; - batch[0].packets.resize(2, Packet::default()); + let mut batches = vec![PacketBatch::default()]; + batches[0].packets.resize(2, Packet::default()); let mut shred = Shred::new_from_data( 0, @@ -142,8 +142,8 @@ pub mod tests { 0xc0de, ); Shredder::sign_shred(&leader_keypair, &mut shred); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let mut shred = Shred::new_from_data( 0, @@ -158,11 +158,11 @@ pub mod tests { ); let wrong_keypair = Keypair::new(); Shredder::sign_shred(&wrong_keypair, &mut shred); - batch[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[1].meta.size = shred.payload.len(); + batches[0].packets[1].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[1].meta.size = shred.payload.len(); - let rv = verifier.verify_batch(batch); - assert!(!rv[0].packets[0].meta.discard); - assert!(rv[0].packets[1].meta.discard); + let rv = verifier.verify_batches(batches); + assert!(!rv[0].packets[0].meta.discard()); + assert!(rv[0].packets[1].meta.discard()); } } diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index 9810c5dc270852..62f190de8a77f4 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -9,9 +9,9 @@ use { crate::sigverify, crossbeam_channel::{SendError, Sender as CrossbeamSender}, solana_measure::measure::Measure, - solana_perf::packet::Packets, + solana_perf::packet::PacketBatch, solana_sdk::timing, - solana_streamer::streamer::{self, PacketReceiver, StreamerError}, + solana_streamer::streamer::{self, PacketBatchReceiver, StreamerError}, std::{ collections::HashMap, sync::mpsc::{Receiver, RecvTimeoutError}, @@ -26,7 +26,7 @@ const MAX_SIGVERIFY_BATCH: usize = 10_000; #[derive(Error, Debug)] pub enum SigVerifyServiceError { #[error("send packets batch error")] - Send(#[from] SendError>), + Send(#[from] SendError>), #[error("streamer error")] Streamer(#[from] StreamerError), @@ -39,7 +39,7 @@ pub struct SigVerifyStage { } pub trait SigVerifier { - fn verify_batch(&self, batch: Vec) -> Vec; + fn verify_batches(&self, batches: Vec) -> Vec; } #[derive(Default, Clone)] @@ -49,7 +49,7 @@ pub struct DisabledSigVerifier {} struct SigVerifierStats { recv_batches_us_hist: histogram::Histogram, // time to call recv_batch verify_batches_pp_us_hist: histogram::Histogram, // per-packet time to call verify_batch - batches_hist: histogram::Histogram, // number of Packets structures per verify call + batches_hist: histogram::Histogram, // number of packet batches per verify call packets_hist: histogram::Histogram, // number of packets per verify call total_batches: usize, total_packets: usize, @@ -122,24 +122,24 @@ impl SigVerifierStats { } impl SigVerifier for DisabledSigVerifier { - fn verify_batch(&self, mut batch: Vec) -> Vec { - sigverify::ed25519_verify_disabled(&mut batch); - batch + fn verify_batches(&self, mut batches: Vec) -> Vec { + sigverify::ed25519_verify_disabled(&mut batches); + batches } } impl SigVerifyStage { #[allow(clippy::new_ret_no_self)] pub fn new( - packet_receiver: Receiver, - verified_sender: CrossbeamSender>, + packet_receiver: Receiver, + verified_sender: CrossbeamSender>, verifier: T, ) -> Self { let thread_hdl = Self::verifier_services(packet_receiver, verified_sender, verifier); Self { thread_hdl } } - pub fn discard_excess_packets(batches: &mut Vec, max_packets: usize) { + pub fn discard_excess_packets(batches: &mut Vec, max_packets: usize) { let mut received_ips = HashMap::new(); for (batch_index, batch) in batches.iter().enumerate() { for (packet_index, packets) in batch.packets.iter().enumerate() { @@ -163,18 +163,20 @@ impl SigVerifyStage { } for (_addr, indexes) in received_ips { for (batch_index, packet_index) in indexes { - batches[batch_index].packets[packet_index].meta.discard = true; + batches[batch_index].packets[packet_index] + .meta + .set_discard(true); } } } fn verifier( - recvr: &PacketReceiver, - sendr: &CrossbeamSender>, + recvr: &PacketBatchReceiver, + sendr: &CrossbeamSender>, verifier: &T, stats: &mut SigVerifierStats, ) -> Result<()> { - let (mut batches, num_packets, recv_duration) = streamer::recv_batch(recvr)?; + let (mut batches, num_packets, recv_duration) = streamer::recv_packet_batches(recvr)?; let batches_len = batches.len(); debug!( @@ -187,7 +189,7 @@ impl SigVerifyStage { } let mut verify_batch_time = Measure::start("sigverify_batch_time"); - sendr.send(verifier.verify_batch(batches))?; + sendr.send(verifier.verify_batches(batches))?; verify_batch_time.stop(); debug!( @@ -216,8 +218,8 @@ impl SigVerifyStage { } fn verifier_service( - packet_receiver: PacketReceiver, - verified_sender: CrossbeamSender>, + packet_receiver: PacketBatchReceiver, + verified_sender: CrossbeamSender>, verifier: &T, ) -> JoinHandle<()> { let verifier = verifier.clone(); @@ -252,8 +254,8 @@ impl SigVerifyStage { } fn verifier_services( - packet_receiver: PacketReceiver, - verified_sender: CrossbeamSender>, + packet_receiver: PacketBatchReceiver, + verified_sender: CrossbeamSender>, verifier: T, ) -> JoinHandle<()> { Self::verifier_service(packet_receiver, verified_sender, &verifier) @@ -268,13 +270,14 @@ impl SigVerifyStage { mod tests { use {super::*, solana_perf::packet::Packet}; - fn count_non_discard(packets: &[Packets]) -> usize { - packets + fn count_non_discard(packet_batches: &[PacketBatch]) -> usize { + packet_batches .iter() - .map(|pp| { - pp.packets + .map(|batch| { + batch + .packets .iter() - .map(|p| if p.meta.discard { 0 } else { 1 }) + .map(|p| if p.meta.discard() { 0 } else { 1 }) .sum::() }) .sum::() @@ -283,14 +286,14 @@ mod tests { #[test] fn test_packet_discard() { solana_logger::setup(); - let mut p = Packets::default(); - p.packets.resize(10, Packet::default()); - p.packets[3].meta.addr = [1u16; 8]; - let mut packets = vec![p]; + let mut batch = PacketBatch::default(); + batch.packets.resize(10, Packet::default()); + batch.packets[3].meta.addr = std::net::IpAddr::from([1u16; 8]); + let mut batches = vec![batch]; let max = 3; - SigVerifyStage::discard_excess_packets(&mut packets, max); - assert_eq!(count_non_discard(&packets), max); - assert!(!packets[0].packets[0].meta.discard); - assert!(!packets[0].packets[3].meta.discard); + SigVerifyStage::discard_excess_packets(&mut batches, max); + assert_eq!(count_non_discard(&batches), max); + assert!(!batches[0].packets[0].meta.discard()); + assert!(!batches[0].packets[3].meta.discard()); } } diff --git a/core/src/system_monitor_service.rs b/core/src/system_monitor_service.rs index 6e5d519bda2400..06ce178218dd98 100644 --- a/core/src/system_monitor_service.rs +++ b/core/src/system_monitor_service.rs @@ -15,7 +15,7 @@ use { }; const MS_PER_S: u64 = 1_000; -const SAMPLE_INTERVAL_UDP_MS: u64 = 60 * MS_PER_S; +const SAMPLE_INTERVAL_UDP_MS: u64 = 2 * MS_PER_S; const SAMPLE_INTERVAL_MEM_MS: u64 = MS_PER_S; const SLEEP_INTERVAL: Duration = Duration::from_millis(500); @@ -130,7 +130,7 @@ impl SystemMonitorService { #[cfg(target_os = "linux")] fn report_udp_stats(old_stats: &UdpStats, new_stats: &UdpStats) { datapoint_info!( - "net-stats", + "net-stats-validator", ( "in_datagrams_delta", new_stats.in_datagrams - old_stats.in_datagrams, diff --git a/core/src/tpu.rs b/core/src/tpu.rs index 4b4eadf92cdc9d..9ffbd6294707fc 100644 --- a/core/src/tpu.rs +++ b/core/src/tpu.rs @@ -109,10 +109,10 @@ impl Tpu { let (verified_gossip_vote_packets_sender, verified_gossip_vote_packets_receiver) = unbounded(); let cluster_info_vote_listener = ClusterInfoVoteListener::new( - exit, + exit.clone(), cluster_info.clone(), verified_gossip_vote_packets_sender, - poh_recorder, + poh_recorder.clone(), vote_tracker, bank_forks.clone(), subscriptions.clone(), diff --git a/core/src/tvu.rs b/core/src/tvu.rs index 9ba7b93e6f32d2..6b516f2fbed1f0 100644 --- a/core/src/tvu.rs +++ b/core/src/tvu.rs @@ -26,6 +26,7 @@ use { voting_service::VotingService, }, crossbeam_channel::unbounded, + solana_accountsdb_plugin_manager::block_metadata_notifier_interface::BlockMetadataNotifierLock, solana_gossip::cluster_info::ClusterInfo, solana_ledger::{ blockstore::Blockstore, blockstore_processor::TransactionStatusSender, @@ -82,6 +83,7 @@ pub struct Sockets { pub repair: UdpSocket, pub retransmit: Vec, pub forwards: Vec, + pub ancestor_hashes_requests: UdpSocket, } #[derive(Default)] @@ -142,17 +144,20 @@ impl Tvu { cost_model: &Arc>, accounts_package_channel: (AccountsPackageSender, AccountsPackageReceiver), last_full_snapshot_slot: Option, + block_metadata_notifier: Option, ) -> Self { let Sockets { repair: repair_socket, fetch: fetch_sockets, retransmit: retransmit_sockets, forwards: tvu_forward_sockets, + ancestor_hashes_requests: ancestor_hashes_socket, } = sockets; let (fetch_sender, fetch_receiver) = channel(); let repair_socket = Arc::new(repair_socket); + let ancestor_hashes_socket = Arc::new(ancestor_hashes_socket); let fetch_sockets: Vec> = fetch_sockets.into_iter().map(Arc::new).collect(); let forward_sockets: Vec> = tvu_forward_sockets.into_iter().map(Arc::new).collect(); @@ -187,6 +192,7 @@ impl Tvu { cluster_info.clone(), Arc::new(retransmit_sockets), repair_socket, + ancestor_hashes_socket, verified_receiver, exit.clone(), cluster_slots_update_receiver, @@ -329,6 +335,7 @@ impl Tvu { cost_update_sender, voting_sender, drop_bank_sender, + block_metadata_notifier, ); let ledger_cleanup_service = tvu_config.max_ledger_shreds.map(|max_ledger_shreds| { @@ -399,6 +406,7 @@ pub mod tests { solana_runtime::bank::Bank, solana_sdk::signature::{Keypair, Signer}, solana_streamer::socket::SocketAddrSpace, + std::sync::atomic::AtomicU64, std::sync::atomic::Ordering, }; @@ -448,6 +456,7 @@ pub mod tests { let bank_forks = Arc::new(RwLock::new(bank_forks)); let tower = Tower::default(); let accounts_package_channel = channel(); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let tvu = Tvu::new( &vote_keypair.pubkey(), Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])), @@ -459,12 +468,14 @@ pub mod tests { retransmit: target1.sockets.retransmit_sockets, fetch: target1.sockets.tvu, forwards: target1.sockets.tvu_forwards, + ancestor_hashes_requests: target1.sockets.ancestor_hashes_requests, } }, blockstore, ledger_signal_receiver, &Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache.clone(), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -493,6 +504,7 @@ pub mod tests { &Arc::new(RwLock::new(CostModel::default())), accounts_package_channel, None, + None, ); exit.store(true, Ordering::Relaxed); tvu.join().unwrap(); diff --git a/core/src/validator.rs b/core/src/validator.rs index f447690e806a53..09f77e6a196002 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -425,6 +425,13 @@ impl Validator { accountsdb_plugin_service.get_transaction_notifier() }); + let block_metadata_notifier = + accountsdb_plugin_service + .as_ref() + .and_then(|accountsdb_plugin_service| { + accountsdb_plugin_service.get_block_metadata_notifier() + }); + info!( "AccountsDb plugin: accounts_update_notifier: {} transaction_notifier: {}", accounts_update_notifier.is_some(), @@ -538,6 +545,8 @@ impl Validator { let rpc_subscriptions = Arc::new(RpcSubscriptions::new_with_config( &exit, + max_complete_transaction_status_slot.clone(), + blockstore.clone(), bank_forks.clone(), block_commitment_cache.clone(), optimistically_confirmed_bank.clone(), @@ -826,6 +835,11 @@ impl Validator { .iter() .map(|s| s.try_clone().expect("Failed to clone TVU forwards Sockets")) .collect(), + ancestor_hashes_requests: node + .sockets + .ancestor_hashes_requests + .try_clone() + .expect("Failed to clone ancestor_hashes_requests socket"), }, blockstore.clone(), ledger_signal_receiver, @@ -869,6 +883,7 @@ impl Validator { &cost_model, accounts_package_channel, last_full_snapshot_slot, + block_metadata_notifier, ); let tpu = Tpu::new( diff --git a/core/src/verified_vote_packets.rs b/core/src/verified_vote_packets.rs index 13ffe50ec1dc33..2db544df3f0395 100644 --- a/core/src/verified_vote_packets.rs +++ b/core/src/verified_vote_packets.rs @@ -1,7 +1,6 @@ use { crate::{cluster_info_vote_listener::VerifiedLabelVotePacketsReceiver, result::Result}, - crossbeam_channel::Select, - solana_perf::packet::Packets, + solana_perf::packet::PacketBatch, solana_runtime::bank::Bank, solana_sdk::{ account::from_account, clock::Slot, hash::Hash, pubkey::Pubkey, signature::Signature, @@ -20,7 +19,7 @@ const MAX_VOTES_PER_VALIDATOR: usize = 1000; pub struct VerifiedVoteMetadata { pub vote_account_key: Pubkey, pub vote: Vote, - pub packet: Packets, + pub packet_batch: PacketBatch, pub signature: Signature, } @@ -70,7 +69,7 @@ impl<'a> ValidatorGossipVotesIterator<'a> { /// /// Iterator is done after iterating through all vote accounts impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { - type Item = Vec; + type Item = Vec; fn next(&mut self) -> Option { // TODO: Maybe prioritize by stake weight @@ -116,7 +115,7 @@ impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { None } }) - .collect::>() + .collect::>() }) }) }); @@ -130,7 +129,7 @@ impl<'a> Iterator for ValidatorGossipVotesIterator<'a> { } } -pub type SingleValidatorVotes = BTreeMap<(Slot, Hash), (Packets, Signature)>; +pub type SingleValidatorVotes = BTreeMap<(Slot, Hash), (PacketBatch, Signature)>; #[derive(Default)] pub struct VerifiedVotePackets(HashMap); @@ -141,16 +140,16 @@ impl VerifiedVotePackets { vote_packets_receiver: &VerifiedLabelVotePacketsReceiver, would_be_leader: bool, ) -> Result<()> { - let mut sel = Select::new(); - sel.recv(vote_packets_receiver); - let _ = sel.ready_timeout(Duration::from_millis(200))?; - for gossip_votes in vote_packets_receiver.try_iter() { + const RECV_TIMEOUT: Duration = Duration::from_millis(200); + let vote_packets = vote_packets_receiver.recv_timeout(RECV_TIMEOUT)?; + let vote_packets = std::iter::once(vote_packets).chain(vote_packets_receiver.try_iter()); + for gossip_votes in vote_packets { if would_be_leader { for verfied_vote_metadata in gossip_votes { let VerifiedVoteMetadata { vote_account_key, vote, - packet, + packet_batch, signature, } = verfied_vote_metadata; if vote.slots.is_empty() { @@ -161,7 +160,7 @@ impl VerifiedVotePackets { let hash = vote.hash; let validator_votes = self.0.entry(vote_account_key).or_default(); - validator_votes.insert((*slot, hash), (packet, signature)); + validator_votes.insert((*slot, hash), (packet_batch, signature)); if validator_votes.len() > MAX_VOTES_PER_VALIDATOR { let smallest_key = validator_votes.keys().next().cloned().unwrap(); @@ -199,7 +198,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote: vote.clone(), - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -219,7 +218,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -241,7 +240,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -264,7 +263,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[2u8; 64]), }]) .unwrap(); @@ -283,7 +282,7 @@ mod tests { // No new messages, should time out assert_matches!( verified_vote_packets.receive_and_process_vote_packets(&r, true), - Err(Error::ReadyTimeout) + Err(Error::CrossbeamRecvTimeout(_)) ); } @@ -303,7 +302,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new(&[1u8; 64]), }]) .unwrap(); @@ -340,7 +339,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new_unique(), }]) .unwrap(); @@ -394,7 +393,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::new(vec![Packet::default(); num_packets]), + packet_batch: PacketBatch::new(vec![Packet::default(); num_packets]), signature: Signature::new_unique(), }]) .unwrap(); @@ -427,12 +426,12 @@ mod tests { // Get and verify batches let num_expected_batches = 2; for _ in 0..num_expected_batches { - let validator_batch: Vec = gossip_votes_iterator.next().unwrap(); + let validator_batch: Vec = gossip_votes_iterator.next().unwrap(); assert_eq!(validator_batch.len(), slot_hashes.slot_hashes().len()); let expected_len = validator_batch[0].packets.len(); assert!(validator_batch .iter() - .all(|p| p.packets.len() == expected_len)); + .all(|batch| batch.packets.len() == expected_len)); } // Should be empty now @@ -461,7 +460,7 @@ mod tests { s.send(vec![VerifiedVoteMetadata { vote_account_key, vote, - packet: Packets::default(), + packet_batch: PacketBatch::default(), signature: Signature::new_unique(), }]) .unwrap(); diff --git a/core/src/vote_simulator.rs b/core/src/vote_simulator.rs index 0c9e4ca7403346..c3cb9286a92bb7 100644 --- a/core/src/vote_simulator.rs +++ b/core/src/vote_simulator.rs @@ -343,7 +343,7 @@ pub fn initialize_state( let GenesisConfigInfo { mut genesis_config, mint_keypair, - voting_keypair: _, + .. } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, diff --git a/core/src/window_service.rs b/core/src/window_service.rs index e56b333bdb4895..7a3e2c5cb07293 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -22,7 +22,7 @@ use { }, solana_measure::measure::Measure, solana_metrics::{inc_new_counter_debug, inc_new_counter_error}, - solana_perf::packet::{Packet, Packets}, + solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, solana_runtime::{bank::Bank, bank_forks::BankForks}, solana_sdk::{clock::Slot, packet::PACKET_DATA_SIZE, pubkey::Pubkey}, @@ -164,8 +164,8 @@ fn verify_shred_slot(shred: &Shred, root: u64) -> bool { match shred.shred_type() { // Only data shreds have parent information ShredType::Data => match shred.parent() { - Some(parent) => blockstore::verify_shred_slots(shred.slot(), parent, root), - None => false, + Ok(parent) => blockstore::verify_shred_slots(shred.slot(), parent, root), + Err(_) => false, }, // Filter out outdated coding shreds ShredType::Code => shred.slot() >= root, @@ -217,12 +217,9 @@ fn run_check_duplicate( let check_duplicate = |shred: Shred| -> Result<()> { let shred_slot = shred.slot(); if !blockstore.has_duplicate_shreds_in_slot(shred_slot) { - if let Some(existing_shred_payload) = blockstore.is_shred_duplicate( - shred_slot, - shred.index(), - shred.payload.clone(), - shred.shred_type(), - ) { + if let Some(existing_shred_payload) = + blockstore.is_shred_duplicate(shred.id(), shred.payload.clone()) + { cluster_info.push_duplicate_shred(&shred, &existing_shred_payload)?; blockstore.store_duplicate_slot( shred_slot, @@ -236,14 +233,10 @@ fn run_check_duplicate( Ok(()) }; - let timer = Duration::from_millis(200); - let shred = shred_receiver.recv_timeout(timer)?; - check_duplicate(shred)?; - while let Ok(shred) = shred_receiver.try_recv() { - check_duplicate(shred)?; - } - - Ok(()) + const RECV_TIMEOUT: Duration = Duration::from_millis(200); + std::iter::once(shred_receiver.recv_timeout(RECV_TIMEOUT)?) + .chain(shred_receiver.try_iter()) + .try_for_each(check_duplicate) } fn verify_repair( @@ -353,7 +346,7 @@ fn recv_window( blockstore: &Blockstore, bank_forks: &RwLock, insert_shred_sender: &CrossbeamSender<(Vec, Vec>)>, - verified_receiver: &CrossbeamReceiver>, + verified_receiver: &CrossbeamReceiver>, retransmit_sender: &Sender>, shred_filter: F, thread_pool: &ThreadPool, @@ -369,7 +362,7 @@ where let last_root = blockstore.last_root(); let working_bank = bank_forks.read().unwrap().working_bank(); let handle_packet = |packet: &Packet| { - if packet.meta.discard { + if packet.meta.discard() { inc_new_counter_debug!("streamer-recv_window-invalid_or_unnecessary_packet", 1); return None; } @@ -382,7 +375,7 @@ where if !shred_filter(&shred, working_bank.clone(), last_root) { return None; } - if packet.meta.repair { + if packet.meta.repair() { let repair_info = RepairMeta { _from_addr: packet.meta.addr(), // If can't parse the nonce, dump the packet. @@ -458,9 +451,10 @@ impl WindowService { #[allow(clippy::too_many_arguments)] pub(crate) fn new( blockstore: Arc, - verified_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, retransmit_sender: Sender>, repair_socket: Arc, + ancestor_hashes_socket: Arc, exit: Arc, repair_info: RepairInfo, leader_schedule_cache: Arc, @@ -486,6 +480,7 @@ impl WindowService { blockstore.clone(), exit.clone(), repair_socket, + ancestor_hashes_socket, repair_info, verified_vote_receiver, outstanding_requests.clone(), @@ -629,7 +624,7 @@ impl WindowService { exit: Arc, blockstore: Arc, insert_sender: CrossbeamSender<(Vec, Vec>)>, - verified_receiver: CrossbeamReceiver>, + verified_receiver: CrossbeamReceiver>, shred_filter: F, bank_forks: Arc>, retransmit_sender: Sender>, @@ -743,7 +738,12 @@ mod test { keypair: &Keypair, ) -> Vec { let shredder = Shredder::new(slot, parent, 0, 0).unwrap(); - shredder.entries_to_shreds(keypair, entries, true, 0).0 + let (data_shreds, _) = shredder.entries_to_shreds( + keypair, entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); + data_shreds } #[test] @@ -878,7 +878,15 @@ mod test { )); // coding shreds don't contain parent slot information, test that slot >= root - let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0); + let (common, coding) = Shredder::new_coding_shred_header( + 5, // slot + 5, // index + 5, // fec_set_index + 6, // num_data_shreds + 6, // num_coding_shreds + 3, // position + 0, // version + ); let mut coding_shred = Shred::new_empty_from_header(common, DataShredHeader::default(), coding); Shredder::sign_shred(&leader_keypair, &mut coding_shred); @@ -954,7 +962,15 @@ mod test { std::net::{IpAddr, Ipv4Addr}, }; solana_logger::setup(); - let (common, coding) = Shredder::new_coding_shred_header(5, 5, 5, 6, 6, 0); + let (common, coding) = Shredder::new_coding_shred_header( + 5, // slot + 5, // index + 5, // fec_set_index + 6, // num_data_shreds + 6, // num_coding_shreds + 4, // position + 0, // version + ); let shred = Shred::new_empty_from_header(common, DataShredHeader::default(), coding); let mut shreds = vec![shred.clone(), shred.clone(), shred]; let _from_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); diff --git a/docs/sidebars.js b/docs/sidebars.js index 3eb4c75df9f74d..b8e03e7dbb1082 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -91,6 +91,7 @@ module.exports = { }, "developing/test-validator", "developing/backwards-compatibility", + "developing/plugins/accountsdb_plugin" ], Integrating: ["integrations/exchange"], Validating: [ diff --git a/docs/src/cli/deploy-a-program.md b/docs/src/cli/deploy-a-program.md index 10f03625ac20e2..50996e9e4ea55e 100644 --- a/docs/src/cli/deploy-a-program.md +++ b/docs/src/cli/deploy-a-program.md @@ -152,42 +152,47 @@ Then issue a new `deploy` command and specify the buffer: solana program deploy --buffer ``` -### Closing buffer accounts and reclaiming their lamports +### Closing program and buffer accounts, and reclaiming their lamports + +Both program and buffer accounts can be closed and their lamport balances +transferred to a recipient's account. If deployment fails there will be a left over buffer account that holds lamports. The buffer account can either be used to [resume a -deploy](#resuming-a-failed-deploy) or closed. When closed, the full balance of -the buffer account will be transferred to the recipient's account. +deploy](#resuming-a-failed-deploy) or closed. -The buffer account's authority must be present to close a buffer account, to -list all the open buffer accounts that match the default authority: +The program or buffer account's authority must be present to close an account, +to list all the open program or buffer accounts that match the default +authority: ```bash +solana program show --programs solana program show --buffers ``` To specify a different authority: ```bash +solana program show --programs --buffer-authority solana program show --buffers --buffer-authority ``` To close a single account: ```bash -solana program close +solana program close ``` To close a single account and specify a different authority than the default: ```bash -solana program close --buffer-authority +solana program close
--buffer-authority ``` To close a single account and specify a different recipient than the default: ```bash -solana program close --recipient +solana program close
--recipient ``` To close all the buffer accounts associated with the current authority: diff --git a/docs/src/cluster/overview.md b/docs/src/cluster/overview.md index fc73263ac34fa9..22f3acd00a056f 100644 --- a/docs/src/cluster/overview.md +++ b/docs/src/cluster/overview.md @@ -20,18 +20,15 @@ Clients send transactions to any validator's Transaction Processing Unit \(TPU\) ## Confirming Transactions -A Solana cluster is capable of subsecond _confirmation_ for up to 150 nodes with plans to scale up to hundreds of thousands of nodes. Once fully implemented, confirmation times are expected to increase only with the logarithm of the number of validators, where the logarithm's base is very high. If the base is one thousand, for example, it means that for the first thousand nodes, confirmation will be the duration of three network hops plus the time it takes the slowest validator of a supermajority to vote. For the next million nodes, confirmation increases by only one network hop. +A Solana cluster is capable of subsecond _confirmation_ for thousands of nodes with plans to scale up to hundreds of thousands of nodes. Confirmation times are expected to increase only with the logarithm of the number of validators, where the logarithm's base is very high. If the base is one thousand, for example, it means that for the first thousand nodes, confirmation will be the duration of three network hops plus the time it takes the slowest validator of a supermajority to vote. For the next million nodes, confirmation increases by only one network hop. Solana defines confirmation as the duration of time from when the leader timestamps a new entry to the moment when it recognizes a supermajority of ledger votes. -A gossip network is much too slow to achieve subsecond confirmation once the network grows beyond a certain size. The time it takes to send messages to all nodes is proportional to the square of the number of nodes. If a blockchain wants to achieve low confirmation and attempts to do it using a gossip network, it will be forced to centralize to just a handful of nodes. - Scalable confirmation can be achieved using the follow combination of techniques: 1. Timestamp transactions with a VDF sample and sign the timestamp. -2. Split the transactions into batches, send each to separate nodes and have - each node share its batch with its peers. +2. Split the transactions into batches, send each to separate nodes and have each node share its batch with its peers. 3. Repeat the previous step recursively until all nodes have all batches. @@ -39,4 +36,4 @@ Solana rotates leaders at fixed intervals, called _slots_. Each leader may only Next, transactions are broken into batches so that a node can send transactions to multiple parties without making multiple copies. If, for example, the leader needed to send 60 transactions to 6 nodes, it would break that collection of 60 into batches of 10 transactions and send one to each node. This allows the leader to put 60 transactions on the wire, not 60 transactions for each node. Each node then shares its batch with its peers. Once the node has collected all 6 batches, it reconstructs the original set of 60 transactions. -A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing, the approach is scaling well up to about 150 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md). +A batch of transactions can only be split so many times before it is so small that header information becomes the primary consumer of network bandwidth. At the time of this writing (December, 2021), the approach is scaling well up to about 1,250 validators. To scale up to hundreds of thousands of validators, each node can apply the same technique as the leader node to another set of nodes of equal size. We call the technique [_Turbine Block Propogation_](turbine-block-propagation.md). diff --git a/docs/src/developing/clients/javascript-reference.md b/docs/src/developing/clients/javascript-reference.md index f378ddb7f69fd6..d798c234ac1020 100644 --- a/docs/src/developing/clients/javascript-reference.md +++ b/docs/src/developing/clients/javascript-reference.md @@ -428,7 +428,7 @@ await web3.sendAndConfirmTransaction(connection, transaction, [fromPublicKey]) [SourceDocumentation](https://solana-labs.github.io/solana-web3.js/classes/Struct.html) -The struct class is used to create Rust compatible structs in javascript. This class is only compatible with Borsch encoded Rust structs. +The struct class is used to create Rust compatible structs in javascript. This class is only compatible with Borsh encoded Rust structs. #### Example Usage @@ -455,7 +455,7 @@ export class Fee extends Struct { [Source Documentation](https://solana-labs.github.io/solana-web3.js/classes/Enum.html) -The Enum class is used to represent a Rust compatible Enum in javascript. The enum will just be a string representation if logged but can be properly encoded/decoded when used in conjunction with [Struct](javascript-api.md#Struct). This class is only compatible with Borsch encoded Rust enumerations. +The Enum class is used to represent a Rust compatible Enum in javascript. The enum will just be a string representation if logged but can be properly encoded/decoded when used in conjunction with [Struct](javascript-api.md#Struct). This class is only compatible with Borsh encoded Rust enumerations. #### Example Usage diff --git a/docs/src/developing/clients/jsonrpc-api.md b/docs/src/developing/clients/jsonrpc-api.md index cc12db378ce762..1f552b5c29ba68 100644 --- a/docs/src/developing/clients/jsonrpc-api.md +++ b/docs/src/developing/clients/jsonrpc-api.md @@ -85,6 +85,8 @@ gives a convenient interface for the RPC methods. Unstable methods may see breaking changes in patch releases and may not be supported in perpetuity. +- [blockSubscribe](jsonrpc-api.md#blocksubscribe---unstable-disabled-by-default) +- [blockUnsubscribe](jsonrpc-api.md#blockunsubscribe) - [slotsUpdatesSubscribe](jsonrpc-api.md#slotsupdatessubscribe---unstable) - [slotsUpdatesUnsubscribe](jsonrpc-api.md#slotsupdatesunsubscribe) - [voteSubscribe](jsonrpc-api.md#votesubscribe---unstable-disabled-by-default) @@ -363,9 +365,6 @@ Result: ### getBlock -**NEW: This method is only available in solana-core v1.7 or newer. Please use -[getConfirmedBlock](jsonrpc-api.md#getconfirmedblock) for solana-core v1.6** - Returns identity and transaction information about a confirmed block in the ledger #### Parameters: @@ -390,7 +389,7 @@ The result field will be an object with the following fields: - `transactions: ` - present if "full" transaction details are requested; an array of JSON objects containing: - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter - `meta: ` - transaction status metadata object, containing `null` or: - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) + - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - `fee: ` - fee this transaction was charged, as u64 integer - `preBalances: ` - array of u64 account balances from before the transaction was processed - `postBalances: ` - array of u64 account balances after the transaction was processed @@ -590,6 +589,7 @@ The JSON structure of token balances is defined as a list of objects in the foll - `accountIndex: ` - Index of the account in which the token balance is provided for. - `mint: ` - Pubkey of the token's mint. +- `owner: ` - Pubkey of token balance's owner. - `uiTokenAmount: ` - - `amount: ` - Raw amount of tokens as a string, ignoring decimals. - `decimals: ` - Number of decimals configured for token's mint. @@ -768,9 +768,6 @@ Result: ### getBlocks -**NEW: This method is only available in solana-core v1.7 or newer. Please use -[getConfirmedBlocks](jsonrpc-api.md#getconfirmedblocks) for solana-core v1.6** - Returns a list of confirmed blocks between two slots #### Parameters: @@ -802,9 +799,6 @@ Result: ### getBlocksWithLimit -**NEW: This method is only available in solana-core v1.7 or newer. Please use -[getConfirmedBlocksWithLimit](jsonrpc-api.md#getconfirmedblockswithlimit) for solana-core v1.6** - Returns a list of confirmed blocks starting at the given slot #### Parameters: @@ -999,7 +993,7 @@ Result: ### getFeeForMessage **NEW: This method is only available in solana-core v1.9 or newer. Please use -[getFees](jsonrpc-api.md#getfees) for solana-core v1.7/v1.8** +[getFees](jsonrpc-api.md#getfees) for solana-core v1.8** Get the fee the network will charge for a particular Message @@ -1156,7 +1150,7 @@ Unhealthy Result (if additional information is available) ### getHighestSnapshotSlot **NEW: This method is only available in solana-core v1.9 or newer. Please use -[getSnapshotSlot](jsonrpc-api.md#getsnapshotslot) for solana-core v1.7/v1.8** +[getSnapshotSlot](jsonrpc-api.md#getsnapshotslot) for solana-core v1.8** Returns the highest slot information that the node has snapshots for. @@ -1464,6 +1458,9 @@ Result: ### getLatestBlockhash +**NEW: This method is only available in solana-core v1.9 or newer. Please use +[getRecentBlockhash](jsonrpc-api.md#getrecentblockhash) for solana-core v1.8** + Returns the latest blockhash #### Parameters: @@ -1976,12 +1973,10 @@ Result: ### getSignaturesForAddress -**NEW: This method is only available in solana-core v1.7 or newer. Please use -[getConfirmedSignaturesForAddress2](jsonrpc-api.md#getconfirmedsignaturesforaddress2) for solana-core v1.6** - -Returns confirmed signatures for transactions involving an -address backwards in time from the provided signature or most recent confirmed block +Returns signatures for confirmed transactions that include the given address in +their `accountKeys` list. Returns signatures backwards in time from the +provided signature or most recent confirmed block #### Parameters: * `` - account address as base-58 encoded string @@ -1998,7 +1993,7 @@ from newest to oldest transaction: * `` * `signature: ` - transaction signature as base-58 encoded string * `slot: ` - The slot that contains the block with the transaction - * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) + * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) * `memo: ` - Memo associated with the transaction, null if no memo is present * `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch) of when transaction was processed. null if not available. @@ -2062,7 +2057,7 @@ An array of: - `` - `slot: ` - The slot the transaction was processed - `confirmations: ` - Number of blocks since signature confirmation, null if rooted, as well as finalized by a supermajority of the cluster - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) + - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - `confirmationStatus: ` - The transaction's cluster confirmation status; either `processed`, `confirmed`, or `finalized`. See [Commitment](jsonrpc-api.md#configuring-state-commitment) for more on optimistic confirmation. - DEPRECATED: `status: ` - Transaction status - `"Ok": ` - Transaction was successful @@ -2727,9 +2722,6 @@ Result: ### getTransaction -**NEW: This method is only available in solana-core v1.7 or newer. Please use -[getConfirmedTransaction](jsonrpc-api.md#getconfirmedtransaction) for solana-core v1.6** - Returns transaction details for a confirmed transaction #### Parameters: @@ -2958,7 +2950,7 @@ curl http://localhost:8899 -X POST -H "Content-Type: application/json" -d ' Result: ```json -{"jsonrpc":"2.0","result":{"solana-core": "1.9.0"},"id":1} +{"jsonrpc":"2.0","result":{"solana-core": "1.9.4"},"id":1} ``` ### getVoteAccounts @@ -3074,6 +3066,9 @@ Result: ### isBlockhashValid +**NEW: This method is only available in solana-core v1.9 or newer. Please use +[getFeeCalculatorForBlockhash](jsonrpc-api.md#getfeecalculatorforblockhash) for solana-core v1.8** + Returns whether a blockhash is still valid or not #### Parameters: @@ -3259,7 +3254,7 @@ Simulate sending a transaction An RpcResponse containing a TransactionStatus object The result will be an RpcResponse JSON object with `value` set to a JSON object with the following fields: -- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) +- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - `logs: ` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure) - `accounts: | null>` - array of accounts with the same length as the `accounts.addresses` array in the request - `` - if the account doesn't exist or if `err` is not null @@ -3451,6 +3446,339 @@ Result: {"jsonrpc": "2.0","result": true,"id": 1} ``` +### blockSubscribe - Unstable, disabled by default + +**This subscription is unstable and only available if the validator was started +with the `--rpc-pubsub-enable-block-subscription` flag. The format of this +subscription may change in the future** + +Subscribe to receive notification anytime a new block is Confirmed or Finalized. + +#### Parameters: + +- `filter: |` - filter criteria for the logs to receive results by account type; currently supported: + - "all" - include all transactions in block + - `{ "mentionsAccountOrProgram": }` - return only transactions that mention the provided public key (as base-58 encoded string). If no mentions in a given block, then no notification will be sent. +- `` - (optional) Configuration object containing the following optional fields: + - (optional) [Commitment](jsonrpc-api.md#configuring-state-commitment) + - (optional) `encoding: ` - encoding for Account data, either "base58" (*slow*), "base64", "base64+zstd" or "jsonParsed". + "jsonParsed" encoding attempts to use program-specific state parsers to return more human-readable and explicit account state data. If "jsonParsed" is requested but a parser cannot be found, the field falls back to base64 encoding, detectable when the `data` field is type ``. Default is "base64". + - (optional) `transactionDetails: ` - level of transaction detail to return, either "full", "signatures", or "none". If parameter not provided, the default detail level is "full". + - (optional) `showRewards: bool` - whether to populate the `rewards` array. If parameter not provided, the default includes rewards. + +#### Results: + +- `integer` - subscription id \(needed to unsubscribe\) + +#### Example: + +Request: +```json +{"jsonrpc": "2.0", "id": "1", "method": "blockSubscribe", "params": ["all"]} +``` +```json +{ + "jsonrpc": "2.0", + "id": "1", + "method": "blockSubscribe", + "params": [ + {"mentionsAccountOrProgram": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op"}, + { + "commitment": "confirmed", + "encoding": "base64", + "showRewards": true, + "transactionDetails": "full" + } + ] +} +``` + +Result: +```json +{"jsonrpc": "2.0","result": 0,"id": 1} +``` + +#### Notification Format: + +The notification will be an object with the following fields: + +-`slot: ` - The corresponding slot. +- `err: ` - Error if something went wrong publishing the notification otherwise null. +- `block: ` - A block object as seen in the [getBlock](jsonrpc-api.md#getblock) RPC HTTP method. + +```json +{ + "jsonrpc": "2.0", + "method": "blockNotification", + "params": { + "result": { + "context": { + "slot": 112301554 + }, + "value": { + "slot": 112301554, + "block": { + "previousBlockhash": "GJp125YAN4ufCSUvZJVdCyWQJ7RPWMmwxoyUQySydZA", + "blockhash": "6ojMHjctdqfB55JDpEpqfHnP96fiaHEcvzEQ2NNcxzHP", + "parentSlot": 112301553, + "transactions": [ + { + "transaction": [ + "OpltwoUvWxYi1P2U8vbIdE/aPntjYo5Aa0VQ2JJyeJE2g9Vvxk8dDGgFMruYfDu8/IfUWb0REppTe7IpAuuLRgIBAAkWnj4KHRpEWWW7gvO1c0BHy06wZi2g7/DLqpEtkRsThAXIdBbhXCLvltw50ZnjDx2hzw74NVn49kmpYj2VZHQJoeJoYJqaKcvuxCi/2i4yywedcVNDWkM84Iuw+cEn9/ROCrXY4qBFI9dveEERQ1c4kdU46xjxj9Vi+QXkb2Kx45QFVkG4Y7HHsoS6WNUiw2m4ffnMNnOVdF9tJht7oeuEfDMuUEaO7l9JeUxppCvrGk3CP45saO51gkwVYEgKzhpKjCx3rgsYxNR81fY4hnUQXSbbc2Y55FkwgRBpVvQK7/+clR4Gjhd3L4y+OtPl7QF93Akg1LaU9wRMs5nvfDFlggqI9PqJl+IvVWrNRdBbPS8LIIhcwbRTkSbqlJQWxYg3Bo2CTVbw7rt1ZubuHWWp0mD/UJpLXGm2JprWTePNULzHu67sfqaWF99LwmwjTyYEkqkRt1T0Je5VzHgJs0N5jY4iIU9K3lMqvrKOIn/2zEMZ+ol2gdgjshx+sphIyhw65F3J/Dbzk04LLkK+CULmN571Y+hFlXF2ke0BIuUG6AUF+4214Cu7FXnqo3rkxEHDZAk0lRrAJ8X/Z+iwuwI5cgbd9uHXZaGT2cvhRs7reawctIXtX1s3kTqM9YV+/wCpDLAp8axcEkaQkLDKRoWxqp8XLNZSKial7Rk+ELAVVKWoWLRXRZ+OIggu0OzMExvVLE5VHqy71FNHq4gGitkiKYNFWSLIE4qGfdFLZXy/6hwS+wq9ewjikCpd//C9BcCL7Wl0iQdUslxNVCBZHnCoPYih9JXvGefOb9WWnjGy14sG9j70+RSVx6BlkFELWwFvIlWR/tHn3EhHAuL0inS2pwX7ZQTAU6gDVaoqbR2EiJ47cKoPycBNvHLoKxoY9AZaBjPl6q8SKQJSFyFd9n44opAgI6zMTjYF/8Ok4VpXEESp3QaoUyTI9sOJ6oFP6f4dwnvQelgXS+AEfAsHsKXxGAIUDQENAgMEBQAGBwgIDg8IBJCER3QXl1AVDBADCQoOAAQLERITDAjb7ugh3gOuTy==", + "base64" + ], + "meta": { + "err": null, + "status": { + "Ok": null + }, + "fee": 5000, + "preBalances": [ + 1758510880, + 2067120, + 1566000, + 1461600, + 2039280, + 2039280, + 1900080, + 1865280, + 0, + 3680844220, + 2039280 + ], + "postBalances": [ + 1758505880, + 2067120, + 1566000, + 1461600, + 2039280, + 2039280, + 1900080, + 1865280, + 0, + 3680844220, + 2039280 + ], + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "programIdIndex": 13, + "accounts": [ + 1, + 15, + 3, + 4, + 2, + 14 + ], + "data": "21TeLgZXNbtHXVBzCaiRmH" + }, + { + "programIdIndex": 14, + "accounts": [ + 3, + 4, + 1 + ], + "data": "6qfC8ic7Aq99" + }, + { + "programIdIndex": 13, + "accounts": [ + 1, + 15, + 3, + 5, + 2, + 14 + ], + "data": "21TeLgZXNbsn4QEpaSEr3q" + }, + { + "programIdIndex": 14, + "accounts": [ + 3, + 5, + 1 + ], + "data": "6LC7BYyxhFRh" + } + ] + }, + { + "index": 1, + "instructions": [ + { + "programIdIndex": 14, + "accounts": [ + 4, + 3, + 0 + ], + "data": "7aUiLHFjSVdZ" + }, + { + "programIdIndex": 19, + "accounts": [ + 17, + 18, + 16, + 9, + 11, + 12, + 14 + ], + "data": "8kvZyjATKQWYxaKR1qD53V" + }, + { + "programIdIndex": 14, + "accounts": [ + 9, + 11, + 18 + ], + "data": "6qfC8ic7Aq99" + } + ] + } + ], + "logMessages": [ + "Program QMNeHCGYnLVDn1icRAfQZpjPLBNkfGbSKRB83G5d8KB invoke [1]", + "Program QMWoBmAyJLAsA1Lh9ugMTw2gciTihncciphzdNzdZYV invoke [2]" + ], + "preTokenBalances": [ + { + "accountIndex": 4, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op" + }, + { + "accountIndex": 5, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": 11513.0679, + "decimals": 6, + "amount": "11513067900", + "uiAmountString": "11513.0679" + }, + "owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk" + }, + { + "accountIndex": 10, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU" + }, + { + "accountIndex": 11, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": 15138.514093, + "decimals": 6, + "amount": "15138514093", + "uiAmountString": "15138.514093" + }, + "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op" + } + ], + "postTokenBalances": [ + { + "accountIndex": 4, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "LieKvPRE8XeX3Y2xVNHjKlpAScD12lYySBVQ4HqoJ5op" + }, + { + "accountIndex": 5, + "mint": "iouQcQBAiEXe6cKLS85zmZxUqaCqBdeHFpqKoSz615u", + "uiTokenAmount": { + "uiAmount": 11513.103028, + "decimals": 6, + "amount": "11513103028", + "uiAmountString": "11513.103028" + }, + "owner": "rXhAofQCT7NN9TUqigyEAUzV1uLL4boeD8CRkNBSkYk" + }, + { + "accountIndex": 10, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": null, + "decimals": 6, + "amount": "0", + "uiAmountString": "0" + }, + "owner": "CL9wkGFT3SZRRNa9dgaovuRV7jrVVigBUZ6DjcgySsCU" + }, + { + "accountIndex": 11, + "mint": "Saber2gLauYim4Mvftnrasomsv6NvAuncvMEZwcLpD1", + "uiTokenAmount": { + "uiAmount": 15489.767829, + "decimals": 6, + "amount": "15489767829", + "uiAmountString": "15489.767829" + }, + "owner": "BeiHVPRE8XeX3Y2xVNrSsTpAScH94nYySBVQ4HqgN9at" + } + ], + "rewards": [] + } + } + ], + "blockTime": 1639926816, + "blockHeight": 101210751 + }, + "err": null + } + }, + "subscription": 14 + } +} +``` + +### blockUnsubscribe + +Unsubscribe from block notifications + +#### Parameters: + +- `` - subscription id to cancel + +#### Results: + +- `` - unsubscribe success message + +#### Example: + +Request: +```json +{"jsonrpc":"2.0", "id":1, "method":"blockUnsubscribe", "params":[0]} +``` + +Response: +```json +{"jsonrpc": "2.0","result": true,"id": 1} +``` + ### logsSubscribe Subscribe to transaction logging @@ -3503,7 +3831,7 @@ Result: The notification will be an RpcResponse JSON object with value equal to: - `signature: ` - The transaction signature base58 encoded. -- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) +- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - `logs: ` - Array of log messages the transaction instructions output during execution, null if simulation failed before the transaction was able to execute (for example due to an invalid blockhash or signature verification failure) Example: @@ -3761,7 +4089,7 @@ Result: #### Notification Format: The notification will be an RpcResponse JSON object with value containing an object with: -- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) +- `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) Example: ```json @@ -4140,7 +4468,7 @@ The result field will be an object with the following fields: - `transactions: ` - present if "full" transaction details are requested; an array of JSON objects containing: - `transaction: ` - [Transaction](#transaction-structure) object, either in JSON format or encoded binary data, depending on encoding parameter - `meta: ` - transaction status metadata object, containing `null` or: - - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) + - `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) - `fee: ` - fee this transaction was charged, as u64 integer - `preBalances: ` - array of u64 account balances from before the transaction was processed - `postBalances: ` - array of u64 account balances after the transaction was processed @@ -4376,8 +4704,10 @@ Result: **DEPRECATED: Please use [getSignaturesForAddress](jsonrpc-api.md#getsignaturesforaddress) instead** This method is expected to be removed in solana-core v2.0 -Returns confirmed signatures for transactions involving an -address backwards in time from the provided signature or most recent confirmed block +Returns signatures for confirmed transactions that include the given address in +their `accountKeys` list. Returns signatures backwards in time from the +provided signature or most recent confirmed block + #### Parameters: * `` - account address as base-58 encoded string @@ -4394,7 +4724,7 @@ from newest to oldest transaction: * `` * `signature: ` - transaction signature as base-58 encoded string * `slot: ` - The slot that contains the block with the transaction - * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/master/sdk/src/transaction.rs#L24) + * `err: ` - Error if transaction failed, null if transaction succeeded. [TransactionError definitions](https://github.com/solana-labs/solana/blob/c0c60386544ec9a9ec7119229f37386d9f070523/sdk/src/transaction/error.rs#L13) * `memo: ` - Memo associated with the transaction, null if no memo is present * `blockTime: ` - estimated production time, as Unix timestamp (seconds since the Unix epoch) of when transaction was processed. null if not available. diff --git a/docs/src/developing/clients/rust-api.md b/docs/src/developing/clients/rust-api.md index 8ec1eea88250fd..6deaf81443acdb 100644 --- a/docs/src/developing/clients/rust-api.md +++ b/docs/src/developing/clients/rust-api.md @@ -21,11 +21,16 @@ Some important crates: - [`solana-client`] — For interacting with a Solana node via the [JSON RPC API](jsonrpc-api). +- [`solana-cli-config`] — Loading and saving the Solana CLI configuration + file. + - [`solana-clap-utils`] — Routines for setting up a CLI, using [`clap`], - as used by the main Solana CLI. + as used by the main Solana CLI. Includes functions for loading all types of + signers supported by the CLI. [`solana-program`]: https://docs.rs/solana-program [`solana-sdk`]: https://docs.rs/solana-sdk [`solana-client`]: https://docs.rs/solana-client +[`solana-cli-config`]: https://docs.rs/solana-cli-config [`solana-clap-utils`]: https://docs.rs/solana-clap-utils [`clap`]: https://docs.rs/clap diff --git a/docs/src/developing/on-chain-programs/developing-c.md b/docs/src/developing/on-chain-programs/developing-c.md index b48509cc340502..d82877c8acdc29 100644 --- a/docs/src/developing/on-chain-programs/developing-c.md +++ b/docs/src/developing/on-chain-programs/developing-c.md @@ -46,7 +46,7 @@ make -C Solana uses the [Criterion](https://github.com/Snaipe/Criterion) test framework and tests are executed each time the program is built [How to -Build](#how-to-build)]. +Build](#how-to-build). To add tests, create a new file next to your source file named `test_.c` and populate it with criterion test cases. For an example see the [helloworld C @@ -65,7 +65,7 @@ see the [overview](overview#loaders). Currently there are two supported loaders [BPF Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17) and [BPF loader -deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14) +deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14). They both have the same raw entrypoint definition, the following is the raw symbol that the runtime looks up and calls: diff --git a/docs/src/developing/on-chain-programs/developing-rust.md b/docs/src/developing/on-chain-programs/developing-rust.md index b638ff18fccfe3..0d8930896d3a8b 100644 --- a/docs/src/developing/on-chain-programs/developing-rust.md +++ b/docs/src/developing/on-chain-programs/developing-rust.md @@ -16,19 +16,6 @@ layout](https://doc.rust-lang.org/cargo/guide/project-layout.html): /Cargo.toml ``` -But must also include: - -``` -/Xargo.toml -``` - -Which must contain: - -``` -[target.bpfel-unknown-unknown.dependencies.std] -features = [] -``` - Solana Rust programs may depend directly on each other in order to gain access to instruction helpers when making [cross-program invocations](developing/programming-model/calling-between-programs.md#cross-program-invocations). When doing so it's important to not pull in the dependent program's entrypoint diff --git a/docs/src/developing/plugins/accountsdb_plugin.md b/docs/src/developing/plugins/accountsdb_plugin.md new file mode 100644 index 00000000000000..0a5e51d36c56c4 --- /dev/null +++ b/docs/src/developing/plugins/accountsdb_plugin.md @@ -0,0 +1,449 @@ +--- +title: Plugins +--- + +## Overview + +Validators under heavy RPC loads, such as when serving getProgramAccounts calls, +can fall behind the network. To solve this problem, the validator has been +enhanced to support a plugin mechanism through which the information about +accounts and slots can be transmitted to external data stores such as relational +databases, NoSQL databases or Kafka. RPC services then can be developed to +consume data from these external data stores with the possibility of more +flexible and targeted optimizations such as caching and indexing. This allows +the validator to focus on processing transactions without being slowed down by +busy RPC requests. + +This document describes the interfaces of the plugin and the referential plugin +implementation for the PostgreSQL database. + +[crates.io]: https://crates.io/search?q=solana- +[docs.rs]: https://docs.rs/releases/search?query=solana- + +### Important Crates: + +- [`solana-accountsdb-plugin-interface`] — This crate defines the plugin +interfaces. + +- [`solana-accountsdb-plugin-postgres`] — The crate for the referential +plugin implementation for the PostgreSQL database. + +[`solana-accountsdb-plugin-interface`]: https://docs.rs/solana-accountsdb-plugin-interface +[`solana-accountsdb-plugin-postgres`]: https://docs.rs/solana-accountsdb-plugin-postgres +[`solana-sdk`]: https://docs.rs/solana-sdk +[`solana-transaction-status`]: https://docs.rs/solana-transaction-status + +## The Plugin Interface + +The Plugin interface is declared in [`solana-accountsdb-plugin-interface`]. It +is defined by the trait `AccountsDbPlugin`. The plugin should implement the +trait and expose a "C" function `_create_plugin` to return the pointer to this +trait. For example, in the referential implementation, the following code +instantiates the PostgreSQL plugin `AccountsDbPluginPostgres ` and returns its +pointer. + +``` +#[no_mangle] +#[allow(improper_ctypes_definitions)] +/// # Safety +/// +/// This function returns the AccountsDbPluginPostgres pointer as trait AccountsDbPlugin. +pub unsafe extern "C" fn _create_plugin() -> *mut dyn AccountsDbPlugin { + let plugin = AccountsDbPluginPostgres::new(); + let plugin: Box = Box::new(plugin); + Box::into_raw(plugin) +} +``` + +A plugin implementation can implement the `on_load` method to initialize itself. +This function is invoked after a plugin is dynamically loaded into the validator +when it starts. The configuration of the plugin is controlled by a configuration +file in JSON format. The JSON file must have a field `libpath` that points +to the full path name of the shared library implementing the plugin, and may +have other configuration information, like connection parameters for the external +database. The plugin configuration file is specified by the validator's CLI +parameter `--accountsdb-plugin-config` and the file must be readable to the +validator process. + +Please see the [config file](#config) for the referential +PostgreSQL plugin below for an example. + +The plugin can implement the `on_unload` method to do any cleanup before the +plugin is unloaded when the validator is gracefully shutdown. + +The plugin framework supports streaming either accounts, transactions or both. +A plugin uses the following function to indicate if it is interested in receiving +account data: + +``` +fn account_data_notifications_enabled(&self) -> bool +``` + +And it uses the following function to indicate if it is interested in receiving +transaction data: + +``` + fn transaction_notifications_enabled(&self) -> bool +``` + +The following method is used for notifying on an account update: + +``` + fn update_account( + &mut self, + account: ReplicaAccountInfoVersions, + slot: u64, + is_startup: bool, + ) -> Result<()> +``` + +The `ReplicaAccountInfoVersions` struct contains the metadata and data of the account +streamed. The `slot` points to the slot the account is being updated at. When +`is_startup` is true, it indicates the account is loaded from snapshots when +the validator starts up. When `is_startup` is false, the account is updated +when processing a transaction. + + +The following method is called when all accounts have been notified when the +validator restores the AccountsDb from snapshots at startup. + +``` +fn notify_end_of_startup(&mut self) -> Result<()> +``` + +When `update_account` is called during processing transactions, the plugin +should process the notification as fast as possible because any delay may +cause the validator to fall behind the network. Persistence to external data +store is best to be done asynchronously. + +The following method is used for notifying slot status changes: + +``` + fn update_slot_status( + &mut self, + slot: u64, + parent: Option, + status: SlotStatus, + ) -> Result<()> +``` + +To ensure data consistency, the plugin implementation can choose to abort +the validator in case of error persisting to external stores. When the +validator restarts the account data will be re-transmitted. + +The following method is used for notifying transactions: + +``` + fn notify_transaction( + &mut self, + transaction: ReplicaTransactionInfoVersions, + slot: u64, + ) -> Result<()> +``` + +The `ReplicaTransactionInfoVersionsoVersions` struct +contains the information about a streamed transaction. It wraps `ReplicaTransactionInfo` + +``` +pub struct ReplicaTransactionInfo<'a> { + /// The first signature of the transaction, used for identifying the transaction. + pub signature: &'a Signature, + + /// Indicates if the transaction is a simple vote transaction. + pub is_vote: bool, + + /// The sanitized transaction. + pub transaction: &'a SanitizedTransaction, + + /// Metadata of the transaction status. + pub transaction_status_meta: &'a TransactionStatusMeta, +} +``` +For details of `SanitizedTransaction` and `TransactionStatusMeta `, +please refer to [`solana-sdk`] and [`solana-transaction-status`] + +The `slot` points to the slot the transaction is executed at. +For more details, please refer to the Rust documentation in +[`solana-accountsdb-plugin-interface`]. + +## Example PostgreSQL Plugin + +The [`solana-accountsdb-plugin-postgres`] crate implements a plugin storing +account data to a PostgreSQL database to illustrate how a plugin can be +developed. + + +### Configuration File Format + + +The plugin is configured using the input configuration file. An example +configuration file looks like the following: + + +``` +{ + "libpath": "/solana/target/release/libsolana_accountsdb_plugin_postgres.so", + "host": "postgres-server", + "user": "solana", + "port": 5433, + "threads": 20, + "batch_size": 20, + "panic_on_db_errors": true, + "accounts_selector" : { + "accounts" : ["*"] + } +} +``` + +The `host`, `user`, and `port` control the PostgreSQL configuration +information. For more advanced connection options, please use the +`connection_str` field. Please see [Rust postgres configuration] +(https://docs.rs/postgres/0.19.2/postgres/config/struct.Config.html). + +To improve the throughput to the database, the plugin supports connection pooling +using multiple threads, each maintaining a connection to the PostgreSQL database. +The count of the threads is controlled by the `threads` field. A higher thread +count usually offers better performance. + +To further improve performance when saving large numbers of accounts at +startup, the plugin uses bulk inserts. The batch size is controlled by the +`batch_size` parameter. This can help reduce the round trips to the database. + +The `panic_on_db_errors` can be used to panic the validator in case of database +errors to ensure data consistency. + +### Account Selection + +The `accounts_selector` can be used to filter the accounts that should be persisted. + +For example, one can use the following to persist only the accounts with particular +Base58-encoded Pubkeys, + +``` + "accounts_selector" : { + "accounts" : ["pubkey-1", "pubkey-2", ..., "pubkey-n"], + } +``` + +Or use the following to select accounts with certain program owners: + +``` + "accounts_selector" : { + "owners" : ["pubkey-owner-1", "pubkey-owner-2", ..., "pubkey-owner-m"], + } +``` + +To select all accounts, use the wildcard character (*): + +``` + "accounts_selector" : { + "accounts" : ["*"], + } +``` + +### Transaction Selection + +`transaction_selector`, controls if and what transactions to store. +If this field is missing, none of the transactions are stored. + +For example, one can use the following to select only the transactions +referencing accounts with particular Base58-encoded Pubkeys, + +``` +"transaction_selector" : { + "mentions" : \["pubkey-1", "pubkey-2", ..., "pubkey-n"\], +} +``` + +The `mentions` field supports wildcards to select all transaction or +all 'vote' transactions. For example, to select all transactions: + +``` +"transaction_selector" : { + "mentions" : \["*"\], +} +``` + +To select all vote transactions: + +``` +"transaction_selector" : { + "mentions" : \["all_votes"\], +} +``` + +### Database Setup + +#### Install PostgreSQL Server + +Please follow [PostgreSQL Ubuntu Installation](https://www.postgresql.org/download/linux/ubuntu/) +on instructions to install the PostgreSQL database server. For example, to +install postgresql-14, + +``` +sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' +wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - +sudo apt-get update +sudo apt-get -y install postgresql-14 +``` +#### Control the Database Access + +Modify the pg_hba.conf as necessary to grant the plugin to access the database. +For example, in /etc/postgresql/14/main/pg_hba.conf, the following entry allows +nodes with IPs in the CIDR 10.138.0.0/24 to access all databases. The validator +runs in a node with an ip in the specified range. + +``` +host all all 10.138.0.0/24 trust +``` + +It is recommended to run the database server on a separate node from the validator for +better performance. + +#### Configure the Database Performance Parameters + +Please refer to the [PostgreSQL Server Configuration](https://www.postgresql.org/docs/14/runtime-config.html) +for configuration details. The referential implementation uses the following +configurations for better database performance in the /etc/postgresql/14/main/postgresql.conf +which are different from the default postgresql-14 installation. + +``` +max_connections = 200 # (change requires restart) +shared_buffers = 1GB # min 128kB +effective_io_concurrency = 1000 # 1-1000; 0 disables prefetching +wal_level = minimal # minimal, replica, or logical +fsync = off # flush data to disk for crash safety +synchronous_commit = off # synchronization level; +full_page_writes = off # recover from partial page writes +max_wal_senders = 0 # max number of walsender processes +``` + +The sample [postgresql.conf](https://github.com/solana-labs/solana/blob/7ac43b16d2c766df61ae0a06d7aaf14ba61996ac/accountsdb-plugin-postgres/scripts/postgresql.conf) +can be used for reference. + +#### Create the Database Instance and the Role + +Start the server: + +``` +sudo systemctl start postgresql@14-main +``` + +Create the database. For example, the following creates a database named 'solana': + +``` +sudo -u postgres createdb solana -p 5433 +``` + +Create the database user. For example, the following creates a regular user named 'solana': + +``` +sudo -u postgres createuser -p 5433 solana +``` + +Verify the database is working using psql. For example, assuming the node running +PostgreSQL has the ip 10.138.0.9, the following command will land in a shell where +SQL commands can be entered: + +``` +psql -U solana -p 5433 -h 10.138.0.9 -w -d solana +``` + +#### Create the Schema Objects + +Use the [create_schema.sql](https://github.com/solana-labs/solana/blob/a70eb098f4ae9cd359c1e40bbb7752b3dd61de8d/accountsdb-plugin-postgres/scripts/create_schema.sql) +to create the objects for storing accounts and slots. + +Download the script from github: + +``` +wget https://raw.githubusercontent.com/solana-labs/solana/a70eb098f4ae9cd359c1e40bbb7752b3dd61de8d/accountsdb-plugin-postgres/scripts/create_schema.sql +``` + +Then run the script: + +``` +psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f create_schema.sql +``` + +After this, start the validator with the plugin by using the `--accountsdb-plugin-config` +argument mentioned above. + +#### Destroy the Schema Objects + +To destroy the database objects, created by `create_schema.sql`, use +[drop_schema.sql](https://github.com/solana-labs/solana/blob/a70eb098f4ae9cd359c1e40bbb7752b3dd61de8d/accountsdb-plugin-postgres/scripts/drop_schema.sql). +For example, + +``` +psql -U solana -p 5433 -h 10.138.0.9 -w -d solana -f drop_schema.sql +``` + +### Capture Historical Account Data + +To capture account historical data, in the configuration file, turn +`store_account_historical_data` to true. + +And ensure the database trigger is created to save data in the `audit_table` when +records in `account` are updated, as shown in `create_schema.sql`, + +``` +CREATE FUNCTION audit_account_update() RETURNS trigger AS $audit_account_update$ + BEGIN + INSERT INTO account_audit (pubkey, owner, lamports, slot, executable, rent_epoch, data, write_version, updated_on) + VALUES (OLD.pubkey, OLD.owner, OLD.lamports, OLD.slot, + OLD.executable, OLD.rent_epoch, OLD.data, OLD.write_version, OLD.updated_on); + RETURN NEW; + END; + +$audit_account_update$ LANGUAGE plpgsql; + +CREATE TRIGGER account_update_trigger AFTER UPDATE OR DELETE ON account + FOR EACH ROW EXECUTE PROCEDURE audit_account_update(); +``` + +The trigger can be dropped to disable this feature, for example, + +``` +DROP TRIGGER account_update_trigger ON account; +``` + +Over time, the account_audit can accumulate large amount of data. You may choose to +limit that by deleting older historical data. + +For example, the following SQL statement can be used to keep up to 1000 of the most +recent records for an account: + +``` +delete from account_audit a2 where (pubkey, write_version) in + (select pubkey, write_version from + (select a.pubkey, a.updated_on, a.slot, a.write_version, a.lamports, + rank() OVER ( partition by pubkey order by write_version desc) as rnk + from account_audit a) ranked + where ranked.rnk > 1000) +``` + +### Main Tables + +The following are the tables in the Postgres database + +| Table | Description | +|:--------------|:------------------------| +| account | Account data | +| slot | Slot metadata | +| transaction | Transaction data | +| account_audit | Account historical data | + + +### Performance Considerations + +When a validator lacks sufficient compute power, the overhead of saving the +account data can cause it to fall behind the network especially when all +accounts or a large number of accounts are selected. The node hosting the +PostgreSQL database need to be powerful enough to handle the database loads +as well. It has been found using GCP n2-standard-64 machine type for the +validator and n2-highmem-32 for the PostgreSQL node is adequate for handling +transmiting all accounts while keeping up with the network. In addition, it is +best to keep the validator and the PostgreSQL in the same local network to +reduce latency. You may need to size the validator and database nodes +differently if serving other loads. diff --git a/docs/src/developing/programming-model/calling-between-programs.md b/docs/src/developing/programming-model/calling-between-programs.md index 96a41764b48ae4..9de6b530996757 100644 --- a/docs/src/developing/programming-model/calling-between-programs.md +++ b/docs/src/developing/programming-model/calling-between-programs.md @@ -57,9 +57,7 @@ given instruction to the `token` program via the instruction's `program_id` field. Note that `invoke` requires the caller to pass all the accounts required by the -instruction being invoked. This means that both the executable account (the -ones that matches the instruction's program id) and the accounts passed to the -instruction processor. +instruction being invoked, except for the executable account (the `program_id`). Before invoking `pay()`, the runtime must ensure that `acme` didn't modify any accounts owned by `token`. It does this by applying the runtime's policy to the diff --git a/docs/src/developing/runtime-facilities/programs.md b/docs/src/developing/runtime-facilities/programs.md index 17a9d1205b0347..63d818dd5c84b7 100644 --- a/docs/src/developing/runtime-facilities/programs.md +++ b/docs/src/developing/runtime-facilities/programs.md @@ -18,7 +18,7 @@ programs, as well include instructions from on-chain programs. ## System Program Create new accounts, allocate account data, assign accounts to owning programs, -transfer lamports from System Program owned accounts and pay transacation fees. +transfer lamports from System Program owned accounts and pay transaction fees. - Program id: `11111111111111111111111111111111` - Instructions: [SystemInstruction](https://docs.rs/solana-sdk/VERSION_FOR_DOCS_RS/solana_sdk/system_instruction/enum.SystemInstruction.html) diff --git a/docs/src/developing/test-validator.md b/docs/src/developing/test-validator.md index b6c40c7eca68c7..5585dd2602ba62 100644 --- a/docs/src/developing/test-validator.md +++ b/docs/src/developing/test-validator.md @@ -14,6 +14,7 @@ starts a full-featured, single-node cluster on the developer's workstation. - Direct [on-chain program](on-chain-programs/overview) deployment (`--bpf-program ...`) - Clone accounts from a public cluster, including programs (`--clone ...`) +- Load accounts from files - Configurable transaction history retention (`--limit-ledger-size ...`) - Configurable epoch length (`--slots-per-epoch ...`) - Jump to an arbitrary slot (`--warp-slot ...`) diff --git a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md index 9ee3e64ce50e34..8f3fb5c03a3ac0 100644 --- a/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md +++ b/docs/src/implemented-proposals/ed_overview/ed_validation_client_economics/ed_vce_state_validation_protocol_based_rewards.md @@ -39,7 +39,9 @@ From these simulated _Inflation Schedules_, we can also project ranges for token Finally we can estimate the _Staked Yield_ on staked SOL, if we introduce an additional parameter, previously discussed, _% of Staked SOL_: -%~\text{SOL Staked} = \frac{\text{Total SOL Staked}}{\text{Total Current Supply}} +$$ +\%~\text{SOL Staked} = \frac{\text{Total SOL Staked}}{\text{Total Current Supply}} +$$ In this case, because _% of Staked SOL_ is a parameter that must be estimated (unlike the _Inflation Schedule_ parameters), it is easier to use specific _Inflation Schedule_ parameters and explore a range of _% of Staked SOL_. For the below example, we’ve chosen the middle of the parameter ranges explored above: diff --git a/docs/src/implemented-proposals/staking-rewards.md b/docs/src/implemented-proposals/staking-rewards.md index 2d23555b1daf45..91a5ec9a0ee981 100644 --- a/docs/src/implemented-proposals/staking-rewards.md +++ b/docs/src/implemented-proposals/staking-rewards.md @@ -4,17 +4,11 @@ title: Staking Rewards A Proof of Stake \(PoS\), \(i.e. using in-protocol asset, SOL, to provide secure consensus\) design is outlined here. Solana implements a proof of stake reward/security scheme for validator nodes in the cluster. The purpose is threefold: -- Align validator incentives with that of the greater cluster through +- Align validator incentives with that of the greater cluster through skin-in-the-game deposits at risk - skin-in-the-game deposits at risk +- Avoid 'nothing at stake' fork voting issues by implementing slashing rules aimed at promoting fork convergence -- Avoid 'nothing at stake' fork voting issues by implementing slashing rules - - aimed at promoting fork convergence - -- Provide an avenue for validator rewards provided as a function of validator - - participation in the cluster. +- Provide an avenue for validator rewards provided as a function of validator participation in the cluster. While many of the details of the specific implementation are currently under consideration and are expected to come into focus through specific modeling studies and parameter exploration on the Solana testnet, we outline here our current thinking on the main components of the PoS system. Much of this thinking is based on the current status of Casper FFG, with optimizations and specific attributes to be modified as is allowed by Solana's Proof of History \(PoH\) blockchain data structure. @@ -24,29 +18,11 @@ Solana's ledger validation design is based on a rotating, stake-weighted selecte To become a Solana validator, one must deposit/lock-up some amount of SOL in a contract. This SOL will not be accessible for a specific time period. The precise duration of the staking lockup period has not been determined. However we can consider three phases of this time for which specific parameters will be necessary: -- _Warm-up period_: which SOL is deposited and inaccessible to the node, - - however PoH transaction validation has not begun. Most likely on the order of - - days to weeks - -- _Validation period_: a minimum duration for which the deposited SOL will be - - inaccessible, at risk of slashing \(see slashing rules below\) and earning - - rewards for the validator participation. Likely duration of months to a - - year. - -- _Cool-down period_: a duration of time following the submission of a - - 'withdrawal' transaction. During this period validation responsibilities have - - been removed and the funds continue to be inaccessible. Accumulated rewards +- _Warm-up period_: which SOL is deposited and inaccessible to the node, however PoH transaction validation has not begun. Most likely on the order of days to weeks - should be delivered at the end of this period, along with the return of the +- _Validation period_: a minimum duration for which the deposited SOL will be inaccessible, at risk of slashing \(see slashing rules below\) and earning rewards for the validator participation. Likely duration of months to a year. - initial deposit. +- _Cool-down period_: a duration of time following the submission of a 'withdrawal' transaction. During this period validation responsibilities have been removed and the funds continue to be inaccessible. Accumulated rewards should be delivered at the end of this period, along with the return of the initial deposit. Solana's trustless sense of time and ordering provided by its PoH data structure, along with its [turbine](https://www.youtube.com/watch?v=qt_gDRXHrHQ&t=1s) data broadcast and transmission design, should provide sub-second transaction confirmation times that scale with the log of the number of nodes in the cluster. This means we shouldn't have to restrict the number of validating nodes with a prohibitive 'minimum deposits' and expect nodes to be able to become validators with nominal amounts of SOL staked. At the same time, Solana's focus on high-throughput should create incentive for validation clients to provide high-performant and reliable hardware. Combined with potential a minimum network speed threshold to join as a validation-client, we expect a healthy validation delegation market to emerge. diff --git a/docs/src/offline-signing.md b/docs/src/offline-signing.md index 1b4075552d080a..afdf2651ef72f3 100644 --- a/docs/src/offline-signing.md +++ b/docs/src/offline-signing.md @@ -19,14 +19,26 @@ transaction. At present, the following commands support offline signing: - [`create-stake-account`](cli/usage.md#solana-create-stake-account) +- [`create-stake-account-checked`](cli/usage.md#solana-create-stake-account-checked) - [`deactivate-stake`](cli/usage.md#solana-deactivate-stake) - [`delegate-stake`](cli/usage.md#solana-delegate-stake) - [`split-stake`](cli/usage.md#solana-split-stake) - [`stake-authorize`](cli/usage.md#solana-stake-authorize) +- [`stake-authorize-checked`](cli/usage.md#solana-stake-authorize-checked) - [`stake-set-lockup`](cli/usage.md#solana-stake-set-lockup) +- [`stake-set-lockup-checked`](cli/usage.md#solana-stake-set-lockup-checked) - [`transfer`](cli/usage.md#solana-transfer) - [`withdraw-stake`](cli/usage.md#solana-withdraw-stake) +- [`create-vote-account`](cli/usage.md#solana-create-vote-account) +- [`vote-authorize-voter`](cli/usage.md#solana-vote-authorize-voter) +- [`vote-authorize-voter-checked`](cli/usage.md#solana-vote-authorize-voter-checked) +- [`vote-authorize-withdrawer`](cli/usage.md#solana-vote-authorize-withdrawer) +- [`vote-authorize-withdrawer-checked`](cli/usage.md#solana-vote-authorize-withdrawer-checked) +- [`vote-update-commission`](cli/usage.md#solana-vote-update-commission) +- [`vote-update-validator`](cli/usage.md#solana-vote-update-validator) +- [`withdraw-from-vote-account`](cli/usage.md#solana-withdraw-from-vote-account) + ## Signing Transactions Offline To sign a transaction offline, pass the following arguments on the command line diff --git a/docs/src/proposals/transactions-v2.md b/docs/src/proposals/transactions-v2.md index 6e4dfa930a5d8e..c003a3161212b8 100644 --- a/docs/src/proposals/transactions-v2.md +++ b/docs/src/proposals/transactions-v2.md @@ -1,4 +1,4 @@ -# Transactions v2 - Address maps +# Transactions v2 - On-chain Address Lookup Tables ## Problem @@ -24,76 +24,100 @@ after accounting for signatures and other transaction metadata. ## Proposed Solution -Introduce a new on-chain program which stores account address maps and add a new -transaction format which supports concise account references through the -on-chain address maps. +1) Introduce a new program which manages on-chain address lookup tables +2) Add a new transaction format which can make use of on-chain +address lookup tables to efficiently load more accounts in a single transaction. -### Address Map Program +### Address Lookup Table Program Here we describe a program-based solution to the problem, whereby a protocol developer or end-user can create collections of related addresses on-chain for -concise use in a transaction's account inputs. This approach is similar to page -tables used in operating systems to succinctly map virtual addresses to physical -memory. +concise use in a transaction's account inputs. -After addresses are stored on-chain in an address map account, they may be +After addresses are stored on-chain in an address lookup table account, they may be succinctly referenced in a transaction using a 1-byte u8 index rather than a full 32-byte address. This will require a new transaction format to make use of these succinct references as well as runtime handling for looking up and loading -accounts from the on-chain mappings. +addresses from the on-chain lookup tables. #### State -Address map accounts must be rent-exempt but may be closed with a one epoch -deactivation period. Address maps must be activated before use. +Address lookup tables must be rent-exempt when initialized and after +each time new addresses are appended. Lookup tables can either be extended +from an on-chain buffered list of addresses or directly by appending +addresses through instruction data. Newly appended addresses require +one slot to warmup before being available to transactions for lookups. -Since transactions use a u8 offset to look up mapped addresses, accounts can -store up to 2^8 addresses each. Anyone may create an address map account of any -size as long as its big enough to store the necessary metadata. In addition to -stored addresses, address map accounts must also track the latest count of -stored addresses and an authority which must be a present signer for all -appended map entries. +Since transactions use a `u8` index to look up addresses, address tables can +store up to 256 addresses each. In addition to stored addresses, address table +accounts also tracks various metadata explained below. -Map additions require one slot to activate so each map should track how many -addresses are still pending activation in their on-chain state: +```rust +/// The maximum number of addresses that a lookup table can hold +pub const LOOKUP_TABLE_MAX_ADDRESSES: usize = 256; + +/// The serialized size of lookup table metadata +pub const LOOKUP_TABLE_META_SIZE: usize = 56; + +pub struct LookupTableMeta { + /// Lookup tables cannot be closed until the deactivation slot is + /// no longer "recent" (not accessible in the `SlotHashes` sysvar). + pub deactivation_slot: Slot, + /// The slot that the table was last extended. Address tables may + /// only be used to lookup addresses that were extended before + /// the current bank's slot. + pub last_extended_slot: Slot, + /// The start index where the table was last extended from during + /// the `last_extended_slot`. + pub last_extended_slot_start_index: u8, + /// Authority address which must sign for each modification. + pub authority: Option, + // Padding to keep addresses 8-byte aligned + pub _padding: u16, + // Raw list of addresses follows this serialized structure in + // the account's data, starting from `LOOKUP_TABLE_META_SIZE`. +} +``` + +To make it easier for address lookup tables to be updated by multi-sig or +governance-controlled authorities, addresses can be buffered on-chain in +a buffer account. Buffer accounts can be used to extend a lookup table +with many addresses in a single small transaction. ```rust -struct AddressMap { - // authority must sign for each addition and to close the map account - authority: Pubkey, - // record a deactivation epoch to help validators know when to remove - // the map from their caches. - deactivation_epoch: Epoch, - // entries may not be modified once activated - activated: bool, - // list of entries, max capacity of u8::MAX - entries: Vec, +pub struct BufferMeta { + /// Authority address which must sign for each modification. + pub authority: Pubkey, + + // Serialized list of stored addresses follows the above metadata. } ``` #### Cleanup -Once an address map gets stale and is no longer used, it can be reclaimed by the -authority withdrawing lamports but the remaining balance must be greater than -two epochs of rent. This ensures that it takes at least one full epoch to -deactivate a map. +Once an address lookup table is no longer needed, it can be deactivated and closed +to have its rent balance reclaimed. Address lookup tables may not be recreated +at the same address because each new lookup table must be initialized at an address +derived from a recent slot. -Maps may not be recreated because each new map must be created at a derived -address using a monotonically increasing counter as a derivation seed. +Address lookup tables can be deactivated at any time but can continue to be used +by transactions until the deactivation slot is no longer present in the slot hashes +sysvar. This cool-down period ensures that in-flight transactions cannot be +censored and that address lookup tables cannot be closed and recreated for the same +slot. #### Cost -Since address map accounts require caching and special handling in the runtime, -they should incur higher costs for storage. Cost structure design will be added -later. +Since address lookups require extra overhead during transaction processing, +they should incur higher costs for a transaction. ### Versioned Transactions -In order to allow accounts to be referenced more succinctly, the structure of -serialized transactions must be modified. The new transaction format should not -affect transaction processing in the Solana VM beyond the increased capacity for -accounts and program invocations. Invoked programs will be unaware of which -transaction format was used. +In order to support address table lookups, the structure of serialized +transactions must be modified. The new transaction format should not +affect transaction processing in the Solana program runtime beyond the +increased capacity for accounts and program invocations. Invoked +programs will be unaware of which transaction format was used. The new transaction format must be distinguished from the current transaction format. Current transactions can fit at most 19 signatures (64-bytes each) but @@ -112,22 +136,18 @@ pub struct Transaction { pub message: Message, } -// Uses custom serialization. If the first bit is set, a versioned message is -// encoded starting from the next byte. If the first bit is not set, all bytes -// are used to encode the original unversioned `Message` format. -pub enum Message { - Unversioned(UnversionedMessage), - Versioned(VersionedMessage), -} - -// use bincode varint encoding to use u8 instead of u32 for enum tags -#[derive(Serialize, Deserialize)] +// Uses custom serialization. If the first bit is set, the remaining bits +// in the first byte will encode a version number. If the first bit is not +// set, the first byte will be treated as the first byte of an encoded +// legacy message. pub enum VersionedMessage { - Current(Box) + Legacy(Message), + V0(v0::Message), } +// The structure of the new v0 Message #[derive(Serialize, Deserialize)] -pub struct MessageV2 { +pub struct Message { // unchanged pub header: MessageHeader, @@ -135,51 +155,57 @@ pub struct MessageV2 { #[serde(with = "short_vec")] pub account_keys: Vec, - /// The last `address_maps.len()` number of readonly unsigned account_keys - /// should be loaded as address maps - #[serde(with = "short_vec")] - pub address_maps: Vec, - // unchanged pub recent_blockhash: Hash, - // unchanged. Account indices are still `u8` encoded so the max number of accounts - // in account_keys + address_maps is limited to 256. + // unchanged + // + // # Notes + // + // Account and program indexes will index into the list of addresses + // constructed from the concatenation of three key lists: + // 1) message `account_keys` + // 2) ordered list of keys loaded from address table `writable_indexes` + // 3) ordered list of keys loaded from address table `readable_indexes` #[serde(with = "short_vec")] pub instructions: Vec, + + /// List of address table lookups used to load additional accounts + /// for this transaction. + #[serde(with = "short_vec")] + pub address_table_lookups: Vec, } +/// Address table lookups describe an on-chain address lookup table to use +/// for loading more readonly and writable accounts in a single tx. #[derive(Serialize, Deserialize)] -pub struct AddressMap { - /// The last num_readonly_entries of entries are read-only - pub num_readonly_entries: u8, - - /// List of map entries to load +pub struct MessageAddressTableLookup { + /// Address lookup table account key + pub account_key: Pubkey, + /// List of indexes used to load writable account addresses #[serde(with = "short_vec")] - pub entries: Vec, + pub writable_indexes: Vec, + /// List of indexes used to load readonly account addresses + #[serde(with = "short_vec")] + pub readonly_indexes: Vec, } ``` #### Size changes -- 1 byte for `prefix` field -- 1 byte for version enum discriminant -- 1 byte for `address_maps` length -- Each map requires 2 bytes for `entries` length and `num_readonly` -- Each map entry is 1 byte (u8) - -#### Cost changes - -Using an address map in a transaction should incur an extra cost due to -the extra work validators need to do to load and cache them. +- 1 extra byte for `version` field +- 1 extra byte for `address_table_lookups` length +- 34 extra bytes for the address and lengths of the `writable_indexes` +and `readonly_indexes` indexes in each address table lookup +- 1 extra byte for each lookup table index #### Metadata changes -Each account accessed via an address map should be stored in the transaction -metadata for quick reference. This will avoid the need for clients to make -multiple RPC round trips to fetch all accounts referenced in a v2 transaction. -It will also make it easier to use the ledger tool to analyze account access -patterns. +Each resolved address from an address lookup table should be stored in +the transaction metadata for quick reference. This will avoid the need for +clients to make multiple RPC round trips to fetch all accounts loaded by a +v2 transaction. It will also make it easier to use the ledger tool to +analyze account access patterns. #### RPC changes @@ -190,63 +216,75 @@ attempting to fetch a versioned transaction which will indicate that they must upgrade. The RPC API should also support an option for returning fully expanded -transactions to abstract away the address map details from downstream clients. +transactions to abstract away the address lookup table details from +downstream clients. ### Limitations -- Max of 256 accounts may be specified in a transaction because u8 is used by compiled -instructions to index into transaction message account keys. -- Address maps can hold up to 256 addresses because references to map entries -are encoded as `u8` in transactions. -- Transaction signers may not be referenced with an address map, the full +- Max of 256 unique accounts may be loaded by a transaction because `u8` +is used by compiled instructions to index into transaction message `account_keys`. +- Address lookup tables can hold up to 256 entries because lookup table indexes are also `u8`. +- Transaction signers may not be loaded through an address lookup table, the full address of each signer must be serialized in the transaction. This ensures that the performance of transaction signature checks is not affected. - Hardware wallets will probably not be able to display details about accounts -referenced through address maps due to inability to verify on-chain data. -- Only single level address maps can be used. Recursive maps will not be supported. +referenced through address lookup tables due to inability to verify on-chain data. +- Only single level address lookup tables can be used. Recursive lookups will not be supported. ## Security Concerns +### Lookup table re-initialization + +If an address lookup table can be closed and re-initialized with new addresses, +any client which is unaware of the change could inadvertently lookup unexpected +addresses. To avoid this, all address lookup tables must be initialized at an +address derived from a recent slot and they cannot be closed until the slot +used for deactivation is no longer in the slot hashes sysvar. + ### Resource consumption Enabling more account inputs in a transaction allows for more program -invocations, write-locks, and data reads / writes. Before address maps are +invocations, write-locks, and data reads / writes. Before address tables are enabled, transaction-wide compute limits and increased costs for write locks and data reads are required. ### Front running -If the addresses listed within an address map account are modifiable, front -running attacks could modify which mapped accounts are resolved for a later -transaction. For this reason, we propose that any stored address is immutable -and that address map accounts themselves may not be recreated. +If the addresses listed within an address lookup table are mutable, front +running attacks could modify which addresses are resolved for a later +transaction. For this reason, address lookup tables are append-only and may +only be closed if it's no longer possible to create a new lookup table at the +same derived address. Additionally, a malicious actor could try to fork the chain immediately after a -new address map account is added to a block. If successful, they could add a -different unexpected map entry in the fork. In order to deter this attack, -clients should wait for address maps to be finalized before using them in a -transaction. Clients may also append integrity check instructions to the -transaction which verify that the correct accounts are used. +new address lookup table account is added to a block. If successful, they could +add a different unexpected table entry in the fork. In order to deter this attack, +clients should wait for address lookup tables to be finalized before using them in a +transaction. Clients may also append integrity check instructions to the +transaction which verify that the correct accounts are looked up. ### Denial of service -Address map accounts will be read very frequently and will therefore be a -more high profile target for denial of service attacks through write locks +Address lookup table accounts may be read very frequently and will therefore +be a more high profile target for denial of service attacks through write locks similar to sysvar accounts. -For this reason, special handling should be given to address map lookups. -Address maps lookups should not be affected by account read/write locks. +For this reason, special handling should be given to address lookup tables. +When an address lookup table is used to lookup addresses for a transaction, +it can be loaded without waiting for a read lock. To avoid race conditions, +only the addresses appended in previous blocks can be used for lookups and +deactivation requires a cool-down period. ### Duplicate accounts Transactions may not load an account more than once whether directly through -`account_keys` or indirectly through `address_maps`. +`account_keys` or indirectly through `address_table_lookups`. ## Other Proposals 1) Account prefixes -Needing to pre-register accounts in an on-chain address map is cumbersome +Needing to pre-register accounts in an on-chain address lookup table is cumbersome because it adds an extra step for transaction processing. Instead, Solana transactions could use variable length address prefixes to specify accounts. These prefix shortcuts can save on data usage without needing to setup on-chain @@ -300,3 +338,11 @@ transaction to aid the sanitization of account indexes. We would also need to encode how many addresses in the list should be loaded as readonly vs read-write. Lastly, special attention must be given to watch out for addresses that exist in multiple account lists. + +5) Increase transaction size + +Significantly larger serialized transactions have an increased likelihood of being +dropped over the wire but this might not be a big issue since clients can retry +transactions anyways. The only time validators need to send individual transactions +over the network is when a leader forwards unprocessed transactions to the next +leader. diff --git a/docs/src/terminology.md b/docs/src/terminology.md index 2e36a7b18413f5..0173f07c1c04e0 100644 --- a/docs/src/terminology.md +++ b/docs/src/terminology.md @@ -108,7 +108,7 @@ The time, i.e. number of [slots](#slot), for which a [leader schedule](#leader-s ## fee account -The fee account in the transaction is the account pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance. +The fee account in the transaction is the account that pays for the cost of including the transaction in the ledger. This is the first account in the transaction. This account must be declared as Read-Write (writable) in the transaction since paying for the transaction reduces the account balance. ## finality diff --git a/docs/src/wallet-guide/apps.md b/docs/src/wallet-guide/apps.md index 54788d05460a92..90dc8f15154f76 100644 --- a/docs/src/wallet-guide/apps.md +++ b/docs/src/wallet-guide/apps.md @@ -5,6 +5,13 @@ title: Mobile App Wallets Solana is supported by multiple third-party apps which should provide a familiar experience for most people who are new or experienced with using crypto wallets. +## Coin98 + +[Coin98](https://coin98.app/) is an app available for iOS and Android and can +be used to send and receive SOL tokens. + +_Note: Coin98 does not support stake accounts or staking operations_ + ## Exodus Send, receive & exchange cryptocurrency with ease on the world's leading Desktop, Mobile and Hardware crypto wallets. @@ -13,6 +20,16 @@ Download [Exodus](https://exodus.com/) to easily and securely manage your Solana Exodus includes live charts, a built-in exchange, and 24/7 human support. +## Solflare + +[Solflare Wallet](https://solflare.com/) has mobile applications available for both +iOS and Android. These Mobile apps have support for sending Solana and SPL tokens, +staking, and NFT management in a fully-featured NFT gallery. + +Security is a top priority for Solflare - the mobile wallet is non-custodial, +meaning keys are managed by the user who retains total control of their own funds. +The app supports biometric protection alongside passwords for maximum security. + ## Trust Wallet [Trust Wallet](https://trustwallet.com/) is an app available for iOS and Android @@ -40,13 +57,6 @@ viewed at any later time in the app by following these steps: - Go to Setting -> Wallets - Under the Options menu for a particular wallet tap "Show Recovery Phrase" -## Coin98 - -[Coin98](https://coin98.app/) is an app available for iOS and Android and can -be used to send and receive SOL tokens. - -_Note: Coin98 does not support stake accounts or staking operations_ - ## Zelcore [Zelcore](https://zelcore.io) is a multi-currency wallet now supporting SOL and all Solana tokens (SPL). Each Zelcore account has 3 separate addresses for each asset. diff --git a/docs/src/wallet-guide/web-wallets.md b/docs/src/wallet-guide/web-wallets.md index 8aac1c14623b84..916a54871a745b 100644 --- a/docs/src/wallet-guide/web-wallets.md +++ b/docs/src/wallet-guide/web-wallets.md @@ -2,6 +2,19 @@ title: Web Wallets --- +## BitKeep + +[BitKeep](https://bitkeep.com) is an digital currency wallet and can send and receive SOL/SPL tokens. +BitKeep also support Solana DApps with BitKeep Browser and BitKeep Chrome. + +## MathWallet + +[MathWallet](https://mathwallet.org/) supports wallet +addresses for sending and receiving SOL and SPL Tokens through its +browser extension and web wallet interface. + +Note: The MathWallet iOS and Android app do not yet support SOL and SPL Tokens_ + ## Phantom [Phantom](https://phantom.app/) is a friendly non-custodial, browser @@ -14,12 +27,21 @@ receive, collect, and swap tokens. Available for Chrome, Brave, Firefox, Vivaldi, and Edge -## SolFlare +## Solflare + +[Solflare](https://solflare.com/) is a non-custodial web wallet created by the +[Solrise Finance](https://solrise.finance) team that was built specifically for Solana. +Solflare is accessible and easy to use but also has a very comprehensive set of features, including: -[SolFlare.com](https://solflare.com/) is a community-created non-custodial -web wallet that was built specifically for Solana. SolFlare supports the creation -and management of stake accounts, and gives users the ability to send and receive -any SPL Token. +- The ability to connect your wallet to almost any Solana Dapp +- Transaction simulations, which show the balance changes expected from a transaction and protect against malicious dapps +- Deep staking support with the ability to create and manage all your staking accounts +- Comprehensive NFT functionality, including the ability to send, receive and preview NFTs from a Metaplex-compatible NFT gallery. Support is provided for image, video, audio, and 3d/VR NFTs. +- An in wallet swap for SPL tokens +- Compatibility with the Ledger hardware wallet + +Solflare is available on web, as a browser extension, and as a mobile app for both Android and iOS. +The extension is available on Chrome, Brave, Firefox, Opera, and Edge. Check out our [guide for using SolFlare](solflare.md). @@ -28,15 +50,3 @@ Check out our [guide for using SolFlare](solflare.md). [sollet.io](https://www.sollet.io/) is a non-custodial web wallet created by the [Project Serum](https://projectserum.com/) team. sollet.io can be used to send and receive SOL and any SPL Token. - -## MathWallet - -[MathWallet](https://mathwallet.org/) supports wallet -addresses for sending and receiving SOL and SPL Tokens through its -browser extension and web wallet interface. - -_Note: The MathWallet iOS and Android app do not yet support SOL and SPL Tokens_ - -## BitKeep -[BitKeep](https://bitkeep.com) is an digital currency wallet and can send and receive SOL/SPL tokens. -BitKeep also support Solana DApps with BitKeep Browser and BitKeep Chrome. diff --git a/dos/Cargo.toml b/dos/Cargo.toml index 2c2ce69210441c..34e1560927e4a7 100644 --- a/dos/Cargo.toml +++ b/dos/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-dos" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,15 +13,15 @@ bincode = "1.3.3" clap = "2.33.1" log = "0.4.14" rand = "0.7.0" -solana-core = { path = "../core", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/download-utils/Cargo.toml b/download-utils/Cargo.toml index 2da187aef10ef0..9b430679078d3a 100644 --- a/download-utils/Cargo.toml +++ b/download-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-download-utils" -version = "1.9.0" +version = "1.9.4" description = "Solana Download Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,8 +14,8 @@ console = "0.15.0" indicatif = "0.16.2" log = "0.4.14" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/entry/Cargo.toml b/entry/Cargo.toml index bf307e757def72..e9348b8eb4f114 100644 --- a/entry/Cargo.toml +++ b/entry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-entry" -version = "1.9.0" +version = "1.9.4" description = "Solana Entry" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,17 +16,17 @@ log = "0.4.11" rand = "0.7.0" rayon = "1.5.1" serde = "1.0.130" -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-merkle-tree = { path = "../merkle-tree", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } bincode = "1.3.3" [dev-dependencies] matches = "0.1.9" -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/entry/src/entry.rs b/entry/src/entry.rs index 59a607838caa88..f02354225da555 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -15,7 +15,7 @@ use { solana_metrics::*, solana_perf::{ cuda_runtime::PinnedVec, - packet::{Packet, Packets, PacketsRecycler, PACKETS_PER_BATCH}, + packet::{Packet, PacketBatch, PacketBatchRecycler, PACKETS_PER_BATCH}, perf_libs, recycler::Recycler, sigverify, @@ -308,7 +308,7 @@ impl<'a> EntrySigVerificationState { pub struct VerifyRecyclers { hash_recycler: Recycler>, tick_count_recycler: Recycler>, - packet_recycler: PacketsRecycler, + packet_recycler: PacketBatchRecycler, out_recycler: Recycler>, tx_offset_recycler: Recycler, } @@ -499,12 +499,12 @@ pub fn start_verify_transactions( }) .flatten() .collect::>(); - let mut packets_vec = entry_txs + let mut packet_batches = entry_txs .par_iter() .chunks(PACKETS_PER_BATCH) .map(|slice| { let vec_size = slice.len(); - let mut packets = Packets::new_with_recycler( + let mut packet_batch = PacketBatch::new_with_recycler( verify_recyclers.packet_recycler.clone(), vec_size, "entry-sig-verify", @@ -515,13 +515,13 @@ pub fn start_verify_transactions( // uninitialized anyway, so the initilization would simply write junk into // the vector anyway. unsafe { - packets.packets.set_len(vec_size); + packet_batch.packets.set_len(vec_size); } let entry_tx_iter = slice .into_par_iter() .map(|tx| tx.to_versioned_transaction()); - let res = packets + let res = packet_batch .packets .par_iter_mut() .zip(entry_tx_iter) @@ -530,7 +530,7 @@ pub fn start_verify_transactions( Packet::populate_packet(pair.0, None, &pair.1).is_ok() }); if res { - Ok(packets) + Ok(packet_batch) } else { Err(TransactionError::SanitizeFailure) } @@ -542,14 +542,14 @@ pub fn start_verify_transactions( let gpu_verify_thread = thread::spawn(move || { let mut verify_time = Measure::start("sigverify"); sigverify::ed25519_verify( - &mut packets_vec, + &mut packet_batches, &tx_offset_recycler, &out_recycler, false, ); - let verified = packets_vec + let verified = packet_batches .iter() - .all(|packets| packets.packets.iter().all(|p| !p.meta.discard)); + .all(|batch| batch.packets.iter().all(|p| !p.meta.discard())); verify_time.stop(); (verified, verify_time.as_us()) }); diff --git a/faucet/Cargo.toml b/faucet/Cargo.toml index 0326c3ab08db49..08661bf7e2e07d 100644 --- a/faucet/Cargo.toml +++ b/faucet/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-faucet" -version = "1.9.0" +version = "1.9.4" description = "Solana Faucet" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,12 +16,12 @@ clap = "2.33" log = "0.4.14" serde = "1.0.130" serde_derive = "1.0.103" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } thiserror = "1.0" tokio = { version = "1", features = ["full"] } diff --git a/frozen-abi/Cargo.toml b/frozen-abi/Cargo.toml index cedbcf71beea50..32a2d18b47ee5f 100644 --- a/frozen-abi/Cargo.toml +++ b/frozen-abi/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi" -version = "1.9.0" +version = "1.9.4" description = "Solana Frozen ABI" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,11 +16,11 @@ log = "0.4.14" serde = "1.0.130" serde_derive = "1.0.103" sha2 = "0.9.8" -solana-frozen-abi-macro = { path = "macro", version = "=1.9.0" } +solana-frozen-abi-macro = { path = "macro", version = "=1.9.4" } thiserror = "1.0" [target.'cfg(not(target_arch = "bpf"))'.dependencies] -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } generic-array = { version = "0.14.4", default-features = false, features = ["serde", "more_lengths"]} memmap2 = "0.5.0" diff --git a/frozen-abi/macro/Cargo.toml b/frozen-abi/macro/Cargo.toml index 4bef26c8698928..ec83c800b2ec3c 100644 --- a/frozen-abi/macro/Cargo.toml +++ b/frozen-abi/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-frozen-abi-macro" -version = "1.9.0" +version = "1.9.4" description = "Solana Frozen ABI Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/genesis-utils/Cargo.toml b/genesis-utils/Cargo.toml index d432a73b86f663..ddd10415cf71ae 100644 --- a/genesis-utils/Cargo.toml +++ b/genesis-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-genesis-utils" -version = "1.9.0" +version = "1.9.4" description = "Solana Genesis Utils" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,9 +10,9 @@ documentation = "https://docs.rs/solana-download-utils" edition = "2021" [dependencies] -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-download-utils = { path = "../download-utils", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-download-utils = { path = "../download-utils", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/genesis/Cargo.toml b/genesis/Cargo.toml index f2e7c369050558..1b943e771e7eab 100644 --- a/genesis/Cargo.toml +++ b/genesis/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-genesis" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -15,16 +15,16 @@ clap = "2.33.1" serde = "1.0.130" serde_json = "1.0.72" serde_yaml = "0.8.21" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } tempfile = "3.2.0" [[bin]] diff --git a/gossip/Cargo.toml b/gossip/Cargo.toml index dac61b7dd72bc8..b4def2358018bb 100644 --- a/gossip/Cargo.toml +++ b/gossip/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-gossip" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -17,7 +17,7 @@ flate2 = "1.0" indexmap = { version = "1.7", features = ["rayon"] } itertools = "0.10.1" log = "0.4.14" -lru = "0.7.0" +lru = "0.7.1" matches = "0.1.9" num-traits = "0.2" rand = "0.7.0" @@ -26,23 +26,23 @@ rayon = "1.5.1" serde = "1.0.130" serde_bytes = "0.11" serde_derive = "1.0.103" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } thiserror = "1.0" [dev-dependencies] diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index edf51db192ade3..d5d3acea11750e 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -47,8 +47,8 @@ use { solana_perf::{ data_budget::DataBudget, packet::{ - limited_deserialize, to_packets_with_destination, Packet, Packets, PacketsRecycler, - PACKET_DATA_SIZE, + limited_deserialize, to_packet_batch_with_destination, Packet, PacketBatch, + PacketBatchRecycler, PACKET_DATA_SIZE, }, }, solana_rayon_threadlimit::get_thread_count, @@ -67,7 +67,7 @@ use { packet, sendmmsg::{multi_target_send, SendPktsError}, socket::SocketAddrSpace, - streamer::{PacketReceiver, PacketSender}, + streamer::{PacketBatchReceiver, PacketBatchSender}, }, solana_vote_program::{ vote_state::MAX_LOCKOUT_HISTORY, vote_transaction::parse_vote_transaction, @@ -94,7 +94,7 @@ use { }; pub const VALIDATOR_PORT_RANGE: PortRange = (8000, 10_000); -pub const MINIMUM_VALIDATOR_PORT_RANGE_WIDTH: u16 = 10; // VALIDATOR_PORT_RANGE must be at least this wide +pub const MINIMUM_VALIDATOR_PORT_RANGE_WIDTH: u16 = 11; // VALIDATOR_PORT_RANGE must be at least this wide /// The Data plane fanout size, also used as the neighborhood size pub const DATA_PLANE_FANOUT: usize = 200; @@ -983,7 +983,7 @@ impl ClusterInfo { assert!((vote_index as usize) < MAX_LOCKOUT_HISTORY); let self_pubkey = self.id(); let now = timestamp(); - let vote = Vote::new(self_pubkey, vote, now); + let vote = Vote::new(self_pubkey, vote, now).unwrap(); let vote = CrdsData::Vote(vote_index, vote); let vote = CrdsValue::new_signed(vote, &self.keypair()); let mut gossip_crds = self.gossip.crds.write().unwrap(); @@ -1588,9 +1588,9 @@ impl ClusterInfo { &self, thread_pool: &ThreadPool, gossip_validators: Option<&HashSet>, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, stakes: &HashMap, - sender: &PacketSender, + sender: &PacketBatchSender, generate_pull_requests: bool, ) -> Result<(), GossipError> { let reqs = self.generate_new_gossip_requests( @@ -1600,11 +1600,11 @@ impl ClusterInfo { generate_pull_requests, ); if !reqs.is_empty() { - let packets = to_packets_with_destination(recycler.clone(), &reqs); + let packet_batch = to_packet_batch_with_destination(recycler.clone(), &reqs); self.stats .packets_sent_gossip_requests_count - .add_relaxed(packets.packets.len() as u64); - sender.send(packets)?; + .add_relaxed(packet_batch.packets.len() as u64); + sender.send(packet_batch)?; } Ok(()) } @@ -1699,7 +1699,7 @@ impl ClusterInfo { pub fn gossip( self: Arc, bank_forks: Option>>, - sender: PacketSender, + sender: PacketBatchSender, gossip_validators: Option>, exit: Arc, ) -> JoinHandle<()> { @@ -1715,7 +1715,7 @@ impl ClusterInfo { let mut last_contact_info_trace = timestamp(); let mut last_contact_info_save = timestamp(); let mut entrypoints_processed = false; - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let crds_data = vec![ CrdsData::Version(Version::new(self.id())), CrdsData::NodeInstance( @@ -1840,9 +1840,9 @@ impl ClusterInfo { // from address, crds filter, caller contact info requests: Vec<(SocketAddr, CrdsFilter, CrdsValue)>, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, stakes: &HashMap, - response_sender: &PacketSender, + response_sender: &PacketBatchSender, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_pull_requests_time); if requests.is_empty() { @@ -1904,7 +1904,7 @@ impl ClusterInfo { &'a self, now: Instant, mut rng: &'a mut R, - packets: &'a mut Packets, + packet_batch: &'a mut PacketBatch, ) -> impl FnMut(&PullData) -> bool + 'a where R: Rng + CryptoRng, @@ -1917,7 +1917,7 @@ impl ClusterInfo { if let Some(ping) = ping { let ping = Protocol::PingMessage(ping); match Packet::from_data(Some(&node.1), ping) { - Ok(packet) => packets.packets.push(packet), + Ok(packet) => packet_batch.packets.push(packet), Err(err) => error!("failed to write ping packet: {:?}", err), }; } @@ -1944,10 +1944,10 @@ impl ClusterInfo { fn handle_pull_requests( &self, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, requests: Vec, stakes: &HashMap, - ) -> Packets { + ) -> PacketBatch { const DEFAULT_EPOCH_DURATION_MS: u64 = DEFAULT_SLOTS_PER_EPOCH * DEFAULT_MS_PER_SLOT; let mut time = Measure::start("handle_pull_requests"); let callers = crds_value::filter_current(requests.iter().map(|r| &r.caller)); @@ -1958,12 +1958,12 @@ impl ClusterInfo { } let output_size_limit = self.update_data_budget(stakes.len()) / PULL_RESPONSE_MIN_SERIALIZED_SIZE; - let mut packets = - Packets::new_unpinned_with_recycler(recycler.clone(), 64, "handle_pull_requests"); + let mut packet_batch = + PacketBatch::new_unpinned_with_recycler(recycler.clone(), 64, "handle_pull_requests"); let (caller_and_filters, addrs): (Vec<_>, Vec<_>) = { let mut rng = rand::thread_rng(); let check_pull_request = - self.check_pull_request(Instant::now(), &mut rng, &mut packets); + self.check_pull_request(Instant::now(), &mut rng, &mut packet_batch); requests .into_iter() .filter(check_pull_request) @@ -2009,7 +2009,7 @@ impl ClusterInfo { }) .unzip(); if responses.is_empty() { - return packets; + return packet_batch; } let mut rng = rand::thread_rng(); let shuffle = WeightedShuffle::new(&mut rng, &scores).unwrap(); @@ -2023,7 +2023,7 @@ impl ClusterInfo { Ok(packet) => { if self.outbound_budget.take(packet.meta.size) { total_bytes += packet.meta.size; - packets.packets.push(packet); + packet_batch.packets.push(packet); sent += 1; } else { inc_new_counter_info!("gossip_pull_request-no_budget", 1); @@ -2043,7 +2043,7 @@ impl ClusterInfo { responses.len(), total_bytes ); - packets + packet_batch } fn handle_batch_pull_responses( @@ -2164,8 +2164,8 @@ impl ClusterInfo { fn handle_batch_ping_messages( &self, pings: I, - recycler: &PacketsRecycler, - response_sender: &PacketSender, + recycler: &PacketBatchRecycler, + response_sender: &PacketBatchSender, ) where I: IntoIterator, { @@ -2175,7 +2175,11 @@ impl ClusterInfo { } } - fn handle_ping_messages(&self, pings: I, recycler: &PacketsRecycler) -> Option + fn handle_ping_messages( + &self, + pings: I, + recycler: &PacketBatchRecycler, + ) -> Option where I: IntoIterator, { @@ -2197,9 +2201,12 @@ impl ClusterInfo { if packets.is_empty() { None } else { - let packets = - Packets::new_unpinned_with_recycler_data(recycler, "handle_ping_messages", packets); - Some(packets) + let packet_batch = PacketBatch::new_unpinned_with_recycler_data( + recycler, + "handle_ping_messages", + packets, + ); + Some(packet_batch) } } @@ -2222,9 +2229,9 @@ impl ClusterInfo { &self, messages: Vec<(Pubkey, Vec)>, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, stakes: &HashMap, - response_sender: &PacketSender, + response_sender: &PacketBatchSender, ) { let _st = ScopedTimer::from(&self.stats.handle_batch_push_messages_time); if messages.is_empty() { @@ -2301,17 +2308,17 @@ impl ClusterInfo { if prune_messages.is_empty() { return; } - let mut packets = to_packets_with_destination(recycler.clone(), &prune_messages); - let num_prune_packets = packets.packets.len(); + let mut packet_batch = to_packet_batch_with_destination(recycler.clone(), &prune_messages); + let num_prune_packets = packet_batch.packets.len(); self.stats .push_response_count - .add_relaxed(packets.packets.len() as u64); + .add_relaxed(packet_batch.packets.len() as u64); let new_push_requests = self.new_push_requests(stakes); inc_new_counter_debug!("cluster_info-push_message-pushes", new_push_requests.len()); for (address, request) in new_push_requests { if ContactInfo::is_valid_address(&address, &self.socket_addr_space) { match Packet::from_data(Some(&address), &request) { - Ok(packet) => packets.packets.push(packet), + Ok(packet) => packet_batch.packets.push(packet), Err(err) => error!("failed to write push-request packet: {:?}", err), } } else { @@ -2323,8 +2330,8 @@ impl ClusterInfo { .add_relaxed(num_prune_packets as u64); self.stats .packets_sent_push_messages_count - .add_relaxed((packets.packets.len() - num_prune_packets) as u64); - let _ = response_sender.send(packets); + .add_relaxed((packet_batch.packets.len() - num_prune_packets) as u64); + let _ = response_sender.send(packet_batch); } fn require_stake_for_gossip(&self, stakes: &HashMap) -> bool { @@ -2342,8 +2349,8 @@ impl ClusterInfo { &self, packets: VecDeque<(/*from:*/ SocketAddr, Protocol)>, thread_pool: &ThreadPool, - recycler: &PacketsRecycler, - response_sender: &PacketSender, + recycler: &PacketBatchRecycler, + response_sender: &PacketBatchSender, stakes: &HashMap, _feature_set: Option<&FeatureSet>, epoch_duration: Duration, @@ -2460,15 +2467,15 @@ impl ClusterInfo { // handling of requests/messages. fn run_socket_consume( &self, - receiver: &PacketReceiver, + receiver: &PacketBatchReceiver, sender: &Sender>, thread_pool: &ThreadPool, ) -> Result<(), GossipError> { const RECV_TIMEOUT: Duration = Duration::from_secs(1); let packets: Vec<_> = receiver.recv_timeout(RECV_TIMEOUT)?.packets.into(); let mut packets = VecDeque::from(packets); - for payload in receiver.try_iter() { - packets.extend(payload.packets.iter().cloned()); + for packet_batch in receiver.try_iter() { + packets.extend(packet_batch.packets.iter().cloned()); let excess_count = packets.len().saturating_sub(MAX_GOSSIP_TRAFFIC); if excess_count > 0 { packets.drain(0..excess_count); @@ -2500,10 +2507,10 @@ impl ClusterInfo { /// Process messages from the network fn run_listen( &self, - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, bank_forks: Option<&RwLock>, receiver: &Receiver>, - response_sender: &PacketSender, + response_sender: &PacketBatchSender, thread_pool: &ThreadPool, last_print: &mut Instant, should_check_duplicate_instance: bool, @@ -2551,7 +2558,7 @@ impl ClusterInfo { pub(crate) fn start_socket_consume_thread( self: Arc, - receiver: PacketReceiver, + receiver: PacketBatchReceiver, sender: Sender>, exit: Arc, ) -> JoinHandle<()> { @@ -2581,12 +2588,12 @@ impl ClusterInfo { self: Arc, bank_forks: Option>>, requests_receiver: Receiver>, - response_sender: PacketSender, + response_sender: PacketBatchSender, should_check_duplicate_instance: bool, exit: Arc, ) -> JoinHandle<()> { let mut last_print = Instant::now(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let thread_pool = ThreadPoolBuilder::new() .num_threads(get_thread_count().min(8)) .thread_name(|i| format!("sol-gossip-work-{}", i)) @@ -2735,6 +2742,7 @@ pub struct Sockets { pub repair: UdpSocket, pub retransmit_sockets: Vec, pub serve_repair: UdpSocket, + pub ancestor_hashes_requests: UdpSocket, } #[derive(Debug)] @@ -2768,6 +2776,8 @@ impl Node { let broadcast = vec![UdpSocket::bind("0.0.0.0:0").unwrap()]; let retransmit_socket = UdpSocket::bind("0.0.0.0:0").unwrap(); let serve_repair = UdpSocket::bind("127.0.0.1:0").unwrap(); + let ancestor_hashes_requests = UdpSocket::bind("0.0.0.0:0").unwrap(); + let info = ContactInfo { id: *pubkey, gossip: gossip_addr, @@ -2797,6 +2807,7 @@ impl Node { repair, retransmit_sockets: vec![retransmit_socket], serve_repair, + ancestor_hashes_requests, }, } } @@ -2838,6 +2849,7 @@ impl Node { let (repair_port, repair) = Self::bind(bind_ip_addr, port_range); let (serve_repair_port, serve_repair) = Self::bind(bind_ip_addr, port_range); let (_, broadcast) = Self::bind(bind_ip_addr, port_range); + let (_, ancestor_hashes_requests) = Self::bind(bind_ip_addr, port_range); let rpc_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); let rpc_pubsub_port = find_available_port_in_range(bind_ip_addr, port_range).unwrap(); @@ -2873,6 +2885,7 @@ impl Node { repair, retransmit_sockets: vec![retransmit_socket], serve_repair, + ancestor_hashes_requests, }, } } @@ -2910,6 +2923,8 @@ impl Node { let (_, broadcast) = multi_bind_in_range(bind_ip_addr, port_range, 4).expect("broadcast multi_bind"); + let (_, ancestor_hashes_requests) = Self::bind(bind_ip_addr, port_range); + let info = ContactInfo { id: *pubkey, gossip: SocketAddr::new(gossip_addr.ip(), gossip_port), @@ -2941,6 +2956,7 @@ impl Node { retransmit_sockets, serve_repair, ip_echo: Some(ip_echo), + ancestor_hashes_requests, }, } } @@ -2955,9 +2971,9 @@ pub fn push_messages_to_peer( let reqs: Vec<_> = ClusterInfo::split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, messages) .map(move |payload| (peer_gossip, Protocol::PushMessage(self_id, payload))) .collect(); - let packets = to_packets_with_destination(PacketsRecycler::default(), &reqs); + let packet_batch = to_packet_batch_with_destination(PacketBatchRecycler::default(), &reqs); let sock = UdpSocket::bind("0.0.0.0:0").unwrap(); - packet::send_to(&packets, &sock, socket_addr_space)?; + packet::send_to(&packet_batch, &sock, socket_addr_space)?; Ok(()) } @@ -3206,7 +3222,7 @@ mod tests { .iter() .map(|ping| Pong::new(ping, &this_node).unwrap()) .collect(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); let packets = cluster_info .handle_ping_messages( remote_nodes @@ -3483,7 +3499,10 @@ mod tests { fn new_with_external_ip_test_gossip() { // Can't use VALIDATOR_PORT_RANGE because if this test runs in parallel with others, the // port returned by `bind_in_range()` might be snatched up before `Node::new_with_external_ip()` runs - let port_range = (VALIDATOR_PORT_RANGE.1 + 10, VALIDATOR_PORT_RANGE.1 + 20); + let port_range = ( + VALIDATOR_PORT_RANGE.1 + MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, + VALIDATOR_PORT_RANGE.1 + (2 * MINIMUM_VALIDATOR_PORT_RANGE_WIDTH), + ); let ip = IpAddr::V4(Ipv4Addr::from(0)); let port = bind_in_range(ip, port_range).expect("Failed to bind").0; @@ -4200,7 +4219,8 @@ mod tests { keypair.pubkey(), vote_tx, 0, // wallclock - ); + ) + .unwrap(); let vote = CrdsValue::new_signed(CrdsData::Vote(1, vote), &Keypair::new()); assert!(bincode::serialized_size(&vote).unwrap() <= PUSH_MESSAGE_MAX_PAYLOAD_SIZE as u64); } diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index c3860a4ccf8cd8..5ce1559f17efd5 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -675,7 +675,7 @@ pub(crate) mod tests { rand::{seq::SliceRandom, thread_rng, SeedableRng}, rand_chacha::ChaChaRng, rayon::ThreadPoolBuilder, - solana_perf::test_tx::test_tx, + solana_perf::test_tx::new_test_vote_tx, solana_sdk::{ hash::{hash, HASH_BYTES}, packet::PACKET_DATA_SIZE, @@ -1623,6 +1623,7 @@ pub(crate) mod tests { #[test] fn test_process_pull_response() { + let mut rng = rand::thread_rng(); let node_crds = RwLock::::default(); let node = CrdsGossipPull::default(); @@ -1678,8 +1679,8 @@ pub(crate) mod tests { ); // construct something that's not a contact info - let peer_vote = - CrdsValue::new_unsigned(CrdsData::Vote(0, Vote::new(peer_pubkey, test_tx(), 0))); + let peer_vote = Vote::new(peer_pubkey, new_test_vote_tx(&mut rng), 0).unwrap(); + let peer_vote = CrdsValue::new_unsigned(CrdsData::Vote(0, peer_vote)); // check that older CrdsValues (non-ContactInfos) infos pass even if are too old, // but a recent contact info (inserted above) exists assert_eq!( diff --git a/gossip/src/crds_value.rs b/gossip/src/crds_value.rs index 310bdf268f9b98..305571623f49f9 100644 --- a/gossip/src/crds_value.rs +++ b/gossip/src/crds_value.rs @@ -305,15 +305,14 @@ impl Sanitize for Vote { } impl Vote { - pub fn new(from: Pubkey, transaction: Transaction, wallclock: u64) -> Self { - let slot = parse_vote_transaction(&transaction) - .and_then(|(_, vote, _)| vote.slots.last().copied()); - Self { + // Returns None if cannot parse transaction into a vote. + pub fn new(from: Pubkey, transaction: Transaction, wallclock: u64) -> Option { + parse_vote_transaction(&transaction).map(|(_, vote, _)| Self { from, transaction, wallclock, - slot, - } + slot: vote.slots.last().copied(), + }) } /// New random Vote for tests and benchmarks. @@ -347,16 +346,11 @@ impl<'de> Deserialize<'de> for Vote { wallclock: u64, } let vote = Vote::deserialize(deserializer)?; - let vote = match vote.transaction.sanitize() { - Ok(_) => Self::new(vote.from, vote.transaction, vote.wallclock), - Err(_) => Self { - from: vote.from, - transaction: vote.transaction, - wallclock: vote.wallclock, - slot: None, - }, - }; - Ok(vote) + vote.transaction + .sanitize() + .map_err(serde::de::Error::custom)?; + Self::new(vote.from, vote.transaction, vote.wallclock) + .ok_or_else(|| serde::de::Error::custom("invalid vote tx")) } } @@ -692,7 +686,7 @@ mod test { bincode::{deserialize, Options}, rand::SeedableRng, rand_chacha::ChaChaRng, - solana_perf::test_tx::test_tx, + solana_perf::test_tx::new_test_vote_tx, solana_sdk::{ signature::{Keypair, Signer}, timing::timestamp, @@ -703,15 +697,14 @@ mod test { #[test] fn test_keys_and_values() { + let mut rng = rand::thread_rng(); let v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::default())); assert_eq!(v.wallclock(), 0); let key = v.contact_info().unwrap().id; assert_eq!(v.label(), CrdsValueLabel::ContactInfo(key)); - let v = CrdsValue::new_unsigned(CrdsData::Vote( - 0, - Vote::new(Pubkey::default(), test_tx(), 0), - )); + let v = Vote::new(Pubkey::default(), new_test_vote_tx(&mut rng), 0).unwrap(); + let v = CrdsValue::new_unsigned(CrdsData::Vote(0, v)); assert_eq!(v.wallclock(), 0); let key = match &v.data { CrdsData::Vote(_, vote) => vote.from, @@ -759,6 +752,7 @@ mod test { #[test] fn test_signature() { + let mut rng = rand::thread_rng(); let keypair = Keypair::new(); let wrong_keypair = Keypair::new(); let mut v = CrdsValue::new_unsigned(CrdsData::ContactInfo(ContactInfo::new_localhost( @@ -766,10 +760,8 @@ mod test { timestamp(), ))); verify_signatures(&mut v, &keypair, &wrong_keypair); - v = CrdsValue::new_unsigned(CrdsData::Vote( - 0, - Vote::new(keypair.pubkey(), test_tx(), timestamp()), - )); + let v = Vote::new(keypair.pubkey(), new_test_vote_tx(&mut rng), timestamp()).unwrap(); + let mut v = CrdsValue::new_unsigned(CrdsData::Vote(0, v)); verify_signatures(&mut v, &keypair, &wrong_keypair); v = CrdsValue::new_unsigned(CrdsData::LowestSlot( 0, @@ -780,14 +772,10 @@ mod test { #[test] fn test_max_vote_index() { + let mut rng = rand::thread_rng(); let keypair = Keypair::new(); - let vote = CrdsValue::new_signed( - CrdsData::Vote( - MAX_VOTES, - Vote::new(keypair.pubkey(), test_tx(), timestamp()), - ), - &keypair, - ); + let vote = Vote::new(keypair.pubkey(), new_test_vote_tx(&mut rng), timestamp()).unwrap(); + let vote = CrdsValue::new_signed(CrdsData::Vote(MAX_VOTES, vote), &keypair); assert!(vote.sanitize().is_err()); } @@ -811,7 +799,8 @@ mod test { Pubkey::new_unique(), // from tx, rng.gen(), // wallclock - ); + ) + .unwrap(); assert_eq!(vote.slot, Some(7)); let bytes = bincode::serialize(&vote).unwrap(); let other = bincode::deserialize(&bytes[..]).unwrap(); diff --git a/gossip/src/duplicate_shred.rs b/gossip/src/duplicate_shred.rs index 1c9c0ee9951485..9a8d0437dcd56f 100644 --- a/gossip/src/duplicate_shred.rs +++ b/gossip/src/duplicate_shred.rs @@ -336,11 +336,12 @@ pub(crate) mod tests { }) .take(5) .collect(); - let (mut data_shreds, _coding_shreds, _last_shred_index) = shredder.entries_to_shreds( + let (mut data_shreds, _coding_shreds) = shredder.entries_to_shreds( keypair, &entries, true, // is_last_in_slot next_shred_index, + next_shred_index, // next_code_index ); data_shreds.swap_remove(0) } diff --git a/install/Cargo.toml b/install/Cargo.toml index 5a3ae2f83c6bf9..c80199633b910a 100644 --- a/install/Cargo.toml +++ b/install/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-install" description = "The solana cluster software installer" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -24,12 +24,12 @@ nix = "0.23.0" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } serde = { version = "1.0.130", features = ["derive"] } serde_yaml = "0.8.21" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-config-program = { path = "../programs/config", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-config-program = { path = "../programs/config", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } semver = "1.0.4" tar = "0.4.37" tempfile = "3.2.0" diff --git a/install/solana-install-init.sh b/install/solana-install-init.sh index b3dd543a1afd9d..ceb9f4c1970fec 100644 --- a/install/solana-install-init.sh +++ b/install/solana-install-init.sh @@ -62,17 +62,24 @@ main() { esac done - case "$(uname)" in + _ostype="$(uname -s)" + _cputype="$(uname -m)" + + case "$_ostype" in Linux) - TARGET=x86_64-unknown-linux-gnu + _ostype=unknown-linux-gnu ;; Darwin) - TARGET=x86_64-apple-darwin + if [[ $_cputype = arm64 ]]; then + _cputype=aarch64 + fi + _ostype=apple-darwin ;; *) err "machine architecture is currently unsupported" ;; esac + TARGET="${_cputype}-${_ostype}" temp_dir="$(mktemp -d 2>/dev/null || ensure mktemp -d -t solana-install-init)" ensure mkdir -p "$temp_dir" diff --git a/install/src/command.rs b/install/src/command.rs index 907ca718adafec..ad4b0341f2d313 100644 --- a/install/src/command.rs +++ b/install/src/command.rs @@ -91,7 +91,10 @@ fn download_to_temp( let temp_dir = TempDir::new()?; let temp_file = temp_dir.path().join("download"); - let client = reqwest::blocking::Client::new(); + let client = reqwest::blocking::Client::builder() + .connect_timeout(Duration::from_secs(30)) + .timeout(None) + .build()?; let progress_bar = new_spinner_progress_bar(); progress_bar.set_message(format!("{}Downloading...", TRUCK)); diff --git a/keygen/Cargo.toml b/keygen/Cargo.toml index 1ab74f4fd80077..b15069afd89d4e 100644 --- a/keygen/Cargo.toml +++ b/keygen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-keygen" -version = "1.9.0" +version = "1.9.4" description = "Solana key generation utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,11 +14,11 @@ bs58 = "0.4.0" clap = "2.33" dirs-next = "2.0.0" num_cpus = "1.13.0" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } tiny-bip39 = "0.8.2" [[bin]] diff --git a/keygen/src/keygen.rs b/keygen/src/keygen.rs index 951026ce731ae8..7f7cf2b40f6bbb 100644 --- a/keygen/src/keygen.rs +++ b/keygen/src/keygen.rs @@ -53,7 +53,7 @@ const WORD_COUNT_ARG: ArgConstant<'static> = ArgConstant { const LANGUAGE_ARG: ArgConstant<'static> = ArgConstant { long: "language", name: "language", - help: "Specify the mnemonic lanaguage that will be present in the generated seed phrase", + help: "Specify the mnemonic language that will be present in the generated seed phrase", }; const NO_PASSPHRASE_ARG: ArgConstant<'static> = ArgConstant { diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index d8433f83c7d8f4..f4c970be052a1e 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-ledger-tool" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -22,20 +22,20 @@ regex = "1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.72" serde_yaml = "0.8.21" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-output = { path = "../cli-output", version = "=1.9.0" } -solana-core = { path = "../core", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-output = { path = "../cli-output", version = "=1.9.4" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } tokio = { version = "1", features = ["full"] } [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/ledger-tool/src/main.rs b/ledger-tool/src/main.rs index a618d28dc7a9f9..db2952ec5f6071 100644 --- a/ledger-tool/src/main.rs +++ b/ledger-tool/src/main.rs @@ -201,7 +201,7 @@ fn output_slot( println!(" Slot Meta {:?} is_full: {}", meta, is_full); } else { println!( - " num_shreds: {} parent_slot: {} num_entries: {} is_full: {}", + " num_shreds: {}, parent_slot: {:?}, num_entries: {}, is_full: {}", num_shreds, meta.parent_slot, entries.len(), @@ -800,10 +800,7 @@ fn compute_slot_cost(blockstore: &Blockstore, slot: Slot) -> Result<(), String> .for_each(|transaction| { num_programs += transaction.message().instructions().len(); - let tx_cost = cost_model.calculate_cost( - &transaction, - true, // demote_program_write_locks - ); + let tx_cost = cost_model.calculate_cost(&transaction); let result = cost_tracker.try_add(&transaction, &tx_cost); if result.is_err() { println!( diff --git a/ledger/Cargo.toml b/ledger/Cargo.toml index 3d50b2c09f8847..f89904a57efba2 100644 --- a/ledger/Cargo.toml +++ b/ledger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-ledger" -version = "1.9.0" +version = "1.9.4" description = "Solana ledger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,7 +16,7 @@ chrono = { version = "0.4.11", features = ["serde"] } chrono-humanize = "0.2.1" crossbeam-channel = "0.5" fs_extra = "1.2.0" -futures = "0.3.18" +futures = "0.3.17" itertools = "0.10.1" lazy_static = "1.4.0" libc = "0.2.108" @@ -31,34 +31,27 @@ rayon = "1.5.1" serde = "1.0.130" serde_bytes = "0.11.5" sha2 = "0.9.8" -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.9.0" } -solana-storage-proto = { path = "../storage-proto", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.9.4" } +solana-storage-proto = { path = "../storage-proto", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } tempfile = "3.2.0" thiserror = "1.0" tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" trees = "0.4.2" - -# Disable reed-solomon-erasure/simd-accel feature on aarch64 only since it -# requires clang to support -march=native. -[target.'cfg(any(target_arch = "aarch64", target_arch = "aarch64_apple_darwin"))'.dependencies] -reed-solomon-erasure = { version = "4.0.2" } - -[target.'cfg(not(any(target_arch = "aarch64", target_arch = "aarch64_apple_darwin")))'.dependencies] -reed-solomon-erasure = { version = "4.0.2", features = ["simd-accel"] } +reed-solomon-erasure = { version = "5.0.1", features = ["simd-accel"] } [dependencies.rocksdb] # Avoid the vendored bzip2 within rocksdb-sys that can cause linker conflicts @@ -70,7 +63,7 @@ features = ["lz4"] [dev-dependencies] assert_matches = "1.5.0" matches = "0.1.9" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } [build-dependencies] rustc_version = "0.4" diff --git a/ledger/benches/sigverify_shreds.rs b/ledger/benches/sigverify_shreds.rs index 4a3de44fffe70e..16bfd7200ef5ec 100644 --- a/ledger/benches/sigverify_shreds.rs +++ b/ledger/benches/sigverify_shreds.rs @@ -7,7 +7,7 @@ use { sigverify_shreds::{sign_shreds_cpu, sign_shreds_gpu, sign_shreds_gpu_pinned_keypair}, }, solana_perf::{ - packet::{Packet, Packets}, + packet::{Packet, PacketBatch}, recycler_cache::RecyclerCache, }, solana_sdk::signature::Keypair, @@ -21,13 +21,13 @@ const NUM_BATCHES: usize = 1; fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) { let recycler_cache = RecyclerCache::default(); - let mut packets = Packets::default(); - packets.packets.set_pinnable(); + let mut packet_batch = PacketBatch::default(); + packet_batch.packets.set_pinnable(); let slot = 0xdead_c0de; // need to pin explicitly since the resize will not cause re-allocation - packets.packets.reserve_and_pin(NUM_PACKETS); - packets.packets.resize(NUM_PACKETS, Packet::default()); - for p in packets.packets.iter_mut() { + packet_batch.packets.reserve_and_pin(NUM_PACKETS); + packet_batch.packets.resize(NUM_PACKETS, Packet::default()); + for p in packet_batch.packets.iter_mut() { let shred = Shred::new_from_data( slot, 0xc0de, @@ -41,25 +41,25 @@ fn bench_sigverify_shreds_sign_gpu(bencher: &mut Bencher) { ); shred.copy_to_packet(p); } - let mut batch = vec![packets; NUM_BATCHES]; + let mut batches = vec![packet_batch; NUM_BATCHES]; let keypair = Keypair::new(); let pinned_keypair = sign_shreds_gpu_pinned_keypair(&keypair, &recycler_cache); let pinned_keypair = Some(Arc::new(pinned_keypair)); //warmup for _ in 0..100 { - sign_shreds_gpu(&keypair, &pinned_keypair, &mut batch, &recycler_cache); + sign_shreds_gpu(&keypair, &pinned_keypair, &mut batches, &recycler_cache); } bencher.iter(|| { - sign_shreds_gpu(&keypair, &pinned_keypair, &mut batch, &recycler_cache); + sign_shreds_gpu(&keypair, &pinned_keypair, &mut batches, &recycler_cache); }) } #[bench] fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) { - let mut packets = Packets::default(); + let mut packet_batch = PacketBatch::default(); let slot = 0xdead_c0de; - packets.packets.resize(NUM_PACKETS, Packet::default()); - for p in packets.packets.iter_mut() { + packet_batch.packets.resize(NUM_PACKETS, Packet::default()); + for p in packet_batch.packets.iter_mut() { let shred = Shred::new_from_data( slot, 0xc0de, @@ -73,9 +73,9 @@ fn bench_sigverify_shreds_sign_cpu(bencher: &mut Bencher) { ); shred.copy_to_packet(p); } - let mut batch = vec![packets; NUM_BATCHES]; + let mut batches = vec![packet_batch; NUM_BATCHES]; let keypair = Keypair::new(); bencher.iter(|| { - sign_shreds_cpu(&keypair, &mut batch); + sign_shreds_cpu(&keypair, &mut batches); }) } diff --git a/ledger/src/ancestor_iterator.rs b/ledger/src/ancestor_iterator.rs index fbc27653776475..8e723be5eaa0be 100644 --- a/ledger/src/ancestor_iterator.rs +++ b/ledger/src/ancestor_iterator.rs @@ -11,8 +11,8 @@ pub struct AncestorIterator<'a> { impl<'a> AncestorIterator<'a> { pub fn new(start_slot: Slot, blockstore: &'a Blockstore) -> Self { let current = blockstore.meta(start_slot).unwrap().and_then(|slot_meta| { - if slot_meta.is_parent_set() && start_slot != 0 { - Some(slot_meta.parent_slot) + if start_slot != 0 { + slot_meta.parent_slot } else { None } @@ -37,13 +37,11 @@ impl<'a> Iterator for AncestorIterator<'a> { let current = self.current; current.map(|slot| { if slot != 0 { - self.current = self.blockstore.meta(slot).unwrap().and_then(|slot_meta| { - if slot_meta.is_parent_set() { - Some(slot_meta.parent_slot) - } else { - None - } - }); + self.current = self + .blockstore + .meta(slot) + .unwrap() + .and_then(|slot_meta| slot_meta.parent_slot); } else { self.current = None; } diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 3c82f70cf4c20d..f2cd775fb12b86 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -10,11 +10,10 @@ use { IteratorMode, LedgerColumn, Result, WriteBatch, }, blockstore_meta::*, - erasure::ErasureConfig, leader_schedule_cache::LeaderScheduleCache, next_slots_iterator::NextSlotsIterator, shred::{ - Result as ShredResult, Shred, ShredType, Shredder, MAX_DATA_SHREDS_PER_FEC_BLOCK, + ErasureSetId, Result as ShredResult, Shred, ShredId, ShredType, Shredder, SHRED_PAYLOAD_SIZE, }, }, @@ -96,6 +95,12 @@ pub type CompletedSlotsSender = SyncSender>; pub type CompletedSlotsReceiver = Receiver>; type CompletedRanges = Vec<(u32, u32)>; +#[derive(Default)] +pub struct SignatureInfosForAddress { + pub infos: Vec, + pub found_before: bool, +} + #[derive(Clone, Copy)] pub enum PurgeType { Exact, @@ -552,8 +557,8 @@ impl Blockstore { false } - pub fn erasure_meta(&self, slot: Slot, set_index: u64) -> Result> { - self.erasure_meta_cf.get((slot, set_index)) + fn erasure_meta(&self, erasure_set: ErasureSetId) -> Result> { + self.erasure_meta_cf.get(erasure_set.store_key()) } pub fn orphan(&self, slot: Slot) -> Result> { @@ -630,14 +635,15 @@ impl Blockstore { index: &'a Index, slot: Slot, erasure_meta: &'a ErasureMeta, - prev_inserted_datas: &'a mut HashMap<(Slot, /*shred index:*/ u64), Shred>, + prev_inserted_shreds: &'a HashMap, data_cf: &'a LedgerColumn, ) -> impl Iterator + 'a { erasure_meta.data_shreds_indices().filter_map(move |i| { - if let Some(shred) = prev_inserted_datas.remove(&(slot, i)) { - return Some(shred); + let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Data); + if let Some(shred) = prev_inserted_shreds.get(&key) { + return Some(shred.clone()); } - if !index.data().is_present(i) { + if !index.data().contains(i) { return None; } match data_cf.get_bytes((slot, i)).unwrap() { @@ -651,17 +657,18 @@ impl Blockstore { } fn get_recovery_coding_shreds<'a>( - index: &'a mut Index, + index: &'a Index, slot: Slot, erasure_meta: &'a ErasureMeta, - prev_inserted_codes: &'a HashMap<(Slot, /*shred index:*/ u64), Shred>, + prev_inserted_shreds: &'a HashMap, code_cf: &'a LedgerColumn, ) -> impl Iterator + 'a { erasure_meta.coding_shreds_indices().filter_map(move |i| { - if let Some(shred) = prev_inserted_codes.get(&(slot, i)) { + let key = ShredId::new(slot, u32::try_from(i).unwrap(), ShredType::Code); + if let Some(shred) = prev_inserted_shreds.get(&key) { return Some(shred.clone()); } - if !index.coding().is_present(i) { + if !index.coding().contains(i) { return None; } match code_cf.get_bytes((slot, i)).unwrap() { @@ -677,24 +684,28 @@ impl Blockstore { fn recover_shreds( index: &mut Index, erasure_meta: &ErasureMeta, - prev_inserted_datas: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>, - prev_inserted_codes: &HashMap<(Slot, /*shred index:*/ u64), Shred>, + prev_inserted_shreds: &HashMap, recovered_data_shreds: &mut Vec, data_cf: &LedgerColumn, code_cf: &LedgerColumn, ) { // Find shreds for this erasure set and try recovery let slot = index.slot; - let mut available_shreds: Vec<_> = - Self::get_recovery_data_shreds(index, slot, erasure_meta, prev_inserted_datas, data_cf) - .collect(); - available_shreds.extend(Self::get_recovery_coding_shreds( + let available_shreds: Vec<_> = Self::get_recovery_data_shreds( index, slot, erasure_meta, - prev_inserted_codes, + prev_inserted_shreds, + data_cf, + ) + .chain(Self::get_recovery_coding_shreds( + index, + slot, + erasure_meta, + prev_inserted_shreds, code_cf, - )); + )) + .collect(); if let Ok(mut result) = Shredder::try_recovery(available_shreds) { Self::submit_metrics(slot, erasure_meta, true, "complete".into(), result.len()); recovered_data_shreds.append(&mut result); @@ -726,10 +737,9 @@ impl Blockstore { fn try_shred_recovery( db: &Database, - erasure_metas: &HashMap<(Slot, /*fec set index:*/ u64), ErasureMeta>, + erasure_metas: &HashMap, index_working_set: &mut HashMap, - prev_inserted_datas: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>, - prev_inserted_codes: &HashMap<(Slot, /*shred index:*/ u64), Shred>, + prev_inserted_shreds: &HashMap, ) -> Vec { let data_cf = db.column::(); let code_cf = db.column::(); @@ -739,7 +749,8 @@ impl Blockstore { // 2. For new data shreds, check if an erasure set exists. If not, don't try recovery // 3. Before trying recovery, check if enough number of shreds have been received // 3a. Enough number of shreds = (#data + #coding shreds) > erasure.num_data - for (&(slot, _fec_set_index), erasure_meta) in erasure_metas.iter() { + for (erasure_set, erasure_meta) in erasure_metas.iter() { + let slot = erasure_set.slot(); let index_meta_entry = index_working_set.get_mut(&slot).expect("Index"); let index = &mut index_meta_entry.index; match erasure_meta.status(index) { @@ -747,8 +758,7 @@ impl Blockstore { Self::recover_shreds( index, erasure_meta, - prev_inserted_datas, - prev_inserted_codes, + prev_inserted_shreds, &mut recovered_data_shreds, &data_cf, &code_cf, @@ -794,8 +804,7 @@ impl Blockstore { let db = &*self.db; let mut write_batch = db.batch()?; - let mut just_inserted_coding_shreds = HashMap::new(); - let mut just_inserted_data_shreds = HashMap::new(); + let mut just_inserted_shreds = HashMap::with_capacity(shreds.len()); let mut erasure_metas = HashMap::new(); let mut slot_meta_working_set = HashMap::new(); let mut index_working_set = HashMap::new(); @@ -819,7 +828,7 @@ impl Blockstore { &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, - &mut just_inserted_data_shreds, + &mut just_inserted_shreds, &mut index_meta_time, is_trusted, handle_duplicate, @@ -847,7 +856,7 @@ impl Blockstore { &mut erasure_metas, &mut index_working_set, &mut write_batch, - &mut just_inserted_coding_shreds, + &mut just_inserted_shreds, &mut index_meta_time, handle_duplicate, is_trusted, @@ -866,8 +875,7 @@ impl Blockstore { db, &erasure_metas, &mut index_working_set, - &mut just_inserted_data_shreds, - &just_inserted_coding_shreds, + &just_inserted_shreds, ); metrics.num_recovered += recovered_data_shreds.len(); @@ -886,7 +894,7 @@ impl Blockstore { &mut index_working_set, &mut slot_meta_working_set, &mut write_batch, - &mut just_inserted_data_shreds, + &mut just_inserted_shreds, &mut index_meta_time, is_trusted, &handle_duplicate, @@ -939,8 +947,8 @@ impl Blockstore { &mut write_batch, )?; - for ((slot, set_index), erasure_meta) in erasure_metas { - write_batch.put::((slot, set_index), &erasure_meta)?; + for (erasure_set, erasure_meta) in erasure_metas { + write_batch.put::(erasure_set.store_key(), &erasure_meta)?; } for (&slot, index_working_set_entry) in index_working_set.iter() { @@ -1014,6 +1022,8 @@ impl Blockstore { } fn erasure_mismatch(shred1: &Shred, shred2: &Shred) -> bool { + // TODO should also compare first-coding-index once position field is + // populated across cluster. shred1.coding_header.num_coding_shreds != shred2.coding_header.num_coding_shreds || shred1.coding_header.num_data_shreds != shred2.coding_header.num_data_shreds } @@ -1022,10 +1032,10 @@ impl Blockstore { fn check_insert_coding_shred( &self, shred: Shred, - erasure_metas: &mut HashMap<(Slot, /*fec set index:*/ u64), ErasureMeta>, + erasure_metas: &mut HashMap, index_working_set: &mut HashMap, write_batch: &mut WriteBatch, - just_received_coding_shreds: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>, + just_received_shreds: &mut HashMap, index_meta_time: &mut u64, handle_duplicate: &F, is_trusted: bool, @@ -1047,7 +1057,7 @@ impl Blockstore { // So, all coding shreds in a given FEC block will have the same set index if !is_trusted { - if index_meta.coding().is_present(shred_index) { + if index_meta.coding().contains(shred_index) { metrics.num_coding_shreds_exists += 1; handle_duplicate(shred); return false; @@ -1059,27 +1069,22 @@ impl Blockstore { } } - let set_index = u64::from(shred.common_header.fec_set_index); - let erasure_config = ErasureConfig::new( - shred.coding_header.num_data_shreds as usize, - shred.coding_header.num_coding_shreds as usize, - ); - - let erasure_meta = erasure_metas.entry((slot, set_index)).or_insert_with(|| { - self.erasure_meta(slot, set_index) + let erasure_set = shred.erasure_set(); + let erasure_meta = erasure_metas.entry(erasure_set).or_insert_with(|| { + self.erasure_meta(erasure_set) .expect("Expect database get to succeed") - .unwrap_or_else(|| ErasureMeta::new(set_index, erasure_config)) + .unwrap_or_else(|| ErasureMeta::from_coding_shred(&shred).unwrap()) }); // TODO: handle_duplicate is not invoked and so duplicate shreds are // not gossiped to the rest of cluster. - if erasure_config != erasure_meta.config() { + if !erasure_meta.check_coding_shred(&shred) { metrics.num_coding_shreds_invalid_erasure_config += 1; let conflicting_shred = self.find_conflicting_coding_shred( &shred, slot, erasure_meta, - just_received_coding_shreds, + just_received_shreds, ); if let Some(conflicting_shred) = conflicting_shred { if self @@ -1095,8 +1100,8 @@ impl Blockstore { // ToDo: This is a potential slashing condition warn!("Received multiple erasure configs for the same erasure set!!!"); warn!( - "Slot: {}, shred index: {}, set_index: {}, is_duplicate: {}, stored config: {:#?}, new config: {:#?}", - slot, shred.index(), set_index, self.has_duplicate_shreds_in_slot(slot), erasure_meta.config(), erasure_config + "Slot: {}, shred index: {}, erasure_set: {:?}, is_duplicate: {}, stored config: {:#?}, new config: {:#?}", + slot, shred.index(), erasure_set, self.has_duplicate_shreds_in_slot(slot), erasure_meta.config(), shred.coding_header, ); return false; @@ -1118,8 +1123,7 @@ impl Blockstore { metrics.num_inserted += 1; } - if let HashMapEntry::Vacant(entry) = just_received_coding_shreds.entry((slot, shred_index)) - { + if let HashMapEntry::Vacant(entry) = just_received_shreds.entry(shred.id()) { metrics.num_coding_shreds_inserted += 1; entry.insert(shred); } @@ -1132,41 +1136,38 @@ impl Blockstore { shred: &Shred, slot: Slot, erasure_meta: &ErasureMeta, - just_received_coding_shreds: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>, + just_received_shreds: &HashMap, ) -> Option> { // Search for the shred which set the initial erasure config, either inserted, - // or in the current batch in just_received_coding_shreds. - let mut conflicting_shred = None; + // or in the current batch in just_received_shreds. for coding_index in erasure_meta.coding_shreds_indices() { let maybe_shred = self.get_coding_shred(slot, coding_index); if let Ok(Some(shred_data)) = maybe_shred { let potential_shred = Shred::new_from_serialized_shred(shred_data).unwrap(); if Self::erasure_mismatch(&potential_shred, shred) { - conflicting_shred = Some(potential_shred.payload); + return Some(potential_shred.payload); } - break; - } else if let Some(potential_shred) = - just_received_coding_shreds.get(&(slot, coding_index)) - { + } else if let Some(potential_shred) = { + let key = ShredId::new(slot, u32::try_from(coding_index).unwrap(), ShredType::Code); + just_received_shreds.get(&key) + } { if Self::erasure_mismatch(potential_shred, shred) { - conflicting_shred = Some(potential_shred.payload.clone()); + return Some(potential_shred.payload.clone()); } - break; } } - - conflicting_shred + None } #[allow(clippy::too_many_arguments)] fn check_insert_data_shred( &self, shred: Shred, - erasure_metas: &mut HashMap<(Slot, /*fec set index:*/ u64), ErasureMeta>, + erasure_metas: &mut HashMap, index_working_set: &mut HashMap, slot_meta_working_set: &mut HashMap, write_batch: &mut WriteBatch, - just_inserted_data_shreds: &mut HashMap<(Slot, /*shred index:*/ u64), Shred>, + just_inserted_shreds: &mut HashMap, index_meta_time: &mut u64, is_trusted: bool, handle_duplicate: &F, @@ -1187,7 +1188,9 @@ impl Blockstore { &self.db, slot_meta_working_set, slot, - shred.parent().ok_or(InsertDataShredError::InvalidShred)?, + shred + .parent() + .map_err(|_| InsertDataShredError::InvalidShred)?, ); let slot_meta = &mut slot_meta_entry.new_slot_meta.borrow_mut(); @@ -1215,7 +1218,7 @@ impl Blockstore { if !self.should_insert_data_shred( &shred, slot_meta, - just_inserted_data_shreds, + just_inserted_shreds, &self.last_root, leader_schedule, shred_source.clone(), @@ -1224,7 +1227,7 @@ impl Blockstore { } } - let set_index = u64::from(shred.common_header.fec_set_index); + let erasure_set = shred.erasure_set(); let newly_completed_data_sets = self.insert_data_shred( slot_meta, index_meta.data_mut(), @@ -1232,11 +1235,11 @@ impl Blockstore { write_batch, shred_source, )?; - just_inserted_data_shreds.insert((slot, shred_index), shred); + just_inserted_shreds.insert(shred.id(), shred); index_meta_working_set_entry.did_insert_occur = true; slot_meta_entry.did_insert_occur = true; - if let HashMapEntry::Vacant(entry) = erasure_metas.entry((slot, set_index)) { - if let Some(meta) = self.erasure_meta(slot, set_index).unwrap() { + if let HashMapEntry::Vacant(entry) = erasure_metas.entry(erasure_set) { + if let Some(meta) = self.erasure_meta(erasure_set).unwrap() { entry.insert(meta); } } @@ -1244,16 +1247,7 @@ impl Blockstore { } fn should_insert_coding_shred(shred: &Shred, last_root: &RwLock) -> bool { - let shred_index = shred.index(); - let fec_set_index = shred.common_header.fec_set_index; - let num_coding_shreds = shred.coding_header.num_coding_shreds as u32; - shred.is_code() - && shred_index >= fec_set_index - && shred_index - fec_set_index < num_coding_shreds - && num_coding_shreds != 0 - && num_coding_shreds <= 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK - && num_coding_shreds - 1 <= u32::MAX - fec_set_index - && shred.slot() > *last_root.read().unwrap() + shred.is_code() && shred.sanitize() && shred.slot() > *last_root.read().unwrap() } fn insert_coding_shred( @@ -1267,12 +1261,12 @@ impl Blockstore { // Assert guaranteed by integrity checks on the shred that happen before // `insert_coding_shred` is called - assert!(shred.is_code() && shred_index >= shred.common_header.fec_set_index as u64); + assert!(shred.is_code() && shred.sanitize()); // Commit step: commit all changes to the mutable structures at once, or none at all. // We don't want only a subset of these changes going through. write_batch.put_bytes::((slot, shred_index), &shred.payload)?; - index_meta.coding_mut().set_present(shred_index, true); + index_meta.coding_mut().insert(shred_index); Ok(()) } @@ -1280,16 +1274,17 @@ impl Blockstore { fn is_data_shred_present(shred: &Shred, slot_meta: &SlotMeta, data_index: &ShredIndex) -> bool { let shred_index = u64::from(shred.index()); // Check that the shred doesn't already exist in blockstore - shred_index < slot_meta.consumed || data_index.is_present(shred_index) + shred_index < slot_meta.consumed || data_index.contains(shred_index) } fn get_data_shred_from_just_inserted_or_db<'a>( &'a self, - just_inserted_data_shreds: &'a HashMap<(Slot, /*shred index:*/ u64), Shred>, + just_inserted_shreds: &'a HashMap, slot: Slot, index: u64, ) -> Cow<'a, Vec> { - if let Some(shred) = just_inserted_data_shreds.get(&(slot, index)) { + let key = ShredId::new(slot, u32::try_from(index).unwrap(), ShredType::Data); + if let Some(shred) = just_inserted_shreds.get(&key) { Cow::Borrowed(&shred.payload) } else { // If it doesn't exist in the just inserted set, it must exist in @@ -1302,7 +1297,7 @@ impl Blockstore { &self, shred: &Shred, slot_meta: &SlotMeta, - just_inserted_data_shreds: &HashMap<(Slot, /*shred index:*/ u64), Shred>, + just_inserted_shreds: &HashMap, last_root: &RwLock, leader_schedule: Option<&LeaderScheduleCache>, shred_source: ShredSource, @@ -1354,14 +1349,14 @@ impl Blockstore { // Check that we do not receive shred_index >= than the last_index // for the slot let last_index = slot_meta.last_index; - if shred_index >= last_index { + if last_index.map(|ix| shred_index >= ix).unwrap_or_default() { let leader_pubkey = leader_schedule .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None)); let ending_shred: Cow> = self.get_data_shred_from_just_inserted_or_db( - just_inserted_data_shreds, + just_inserted_shreds, slot, - last_index, + last_index.unwrap(), ); if self @@ -1380,7 +1375,7 @@ impl Blockstore { ( "error", format!( - "Leader {:?}, slot {}: received index {} >= slot.last_index {}, shred_source: {:?}", + "Leader {:?}, slot {}: received index {} >= slot.last_index {:?}, shred_source: {:?}", leader_pubkey, slot, shred_index, last_index, shred_source ), String @@ -1395,7 +1390,7 @@ impl Blockstore { .and_then(|leader_schedule| leader_schedule.slot_leader_at(slot, None)); let ending_shred: Cow> = self.get_data_shred_from_just_inserted_or_db( - just_inserted_data_shreds, + just_inserted_shreds, slot, slot_meta.received - 1, ); @@ -1426,7 +1421,12 @@ impl Blockstore { } let last_root = *last_root.read().unwrap(); - verify_shred_slots(slot, slot_meta.parent_slot, last_root) + // TODO Shouldn't this use shred.parent() instead and update + // slot_meta.parent_slot accordingly? + slot_meta + .parent_slot + .map(|parent_slot| verify_shred_slots(slot, parent_slot, last_root)) + .unwrap_or_default() } fn insert_data_shred( @@ -1460,7 +1460,7 @@ impl Blockstore { let new_consumed = if slot_meta.consumed == index { let mut current_index = index + 1; - while data_index.is_present(current_index) { + while data_index.contains(current_index) { current_index += 1; } current_index @@ -1476,7 +1476,7 @@ impl Blockstore { // But only need to store the bytes within data_header.size &shred.payload[..shred.data_header.size as usize], )?; - data_index.set_present(index, true); + data_index.insert(index); let newly_completed_data_sets = update_slot_meta( last_in_slot, last_in_data, @@ -1525,7 +1525,14 @@ impl Blockstore { i64 ), ("slot", slot_meta.slot, i64), - ("last_index", slot_meta.last_index, i64), + ( + "last_index", + slot_meta + .last_index + .and_then(|ix| i64::try_from(ix).ok()) + .unwrap_or(-1), + i64 + ), ("num_repaired", num_repaired, i64), ("num_recovered", num_recovered, i64), ); @@ -1557,7 +1564,8 @@ impl Blockstore { .collect() } - pub fn get_data_shreds( + #[cfg(test)] + fn get_data_shreds( &self, slot: Slot, from_index: u64, @@ -1650,8 +1658,13 @@ impl Blockstore { 0 } }; - let (mut data_shreds, mut coding_shreds, _) = - shredder.entries_to_shreds(keypair, ¤t_entries, true, start_index); + let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds( + keypair, + ¤t_entries, + true, // is_last_in_slot + start_index, // next_shred_index + start_index, // next_code_index + ); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); shredder = Shredder::new( @@ -1670,8 +1683,13 @@ impl Blockstore { } if !slot_entries.is_empty() { - let (mut data_shreds, mut coding_shreds, _) = - shredder.entries_to_shreds(keypair, &slot_entries, is_full_slot, 0); + let (mut data_shreds, mut coding_shreds) = shredder.entries_to_shreds( + keypair, + &slot_entries, + is_full_slot, + 0, // next_shred_index + 0, // next_code_index + ); all_shreds.append(&mut data_shreds); all_shreds.append(&mut coding_shreds); } @@ -1877,8 +1895,12 @@ impl Blockstore { } transaction }); - let parent_slot_entries = self - .get_slot_entries(slot_meta.parent_slot, 0) + let parent_slot_entries = slot_meta + .parent_slot + .and_then(|parent_slot| { + self.get_slot_entries(parent_slot, /*shred_start_index:*/ 0) + .ok() + }) .unwrap_or_default(); if parent_slot_entries.is_empty() && require_previous_blockhash { return Err(BlockstoreError::ParentEntriesUnavailable); @@ -1904,7 +1926,9 @@ impl Blockstore { let block = ConfirmedBlock { previous_blockhash: previous_blockhash.to_string(), blockhash: blockhash.to_string(), - parent_slot: slot_meta.parent_slot, + // If the slot is full it should have parent_slot populated + // from shreds received. + parent_slot: slot_meta.parent_slot.unwrap(), transactions: self .map_transactions_to_statuses(slot, slot_transaction_iterator)?, rewards, @@ -2398,7 +2422,7 @@ impl Blockstore { before: Option, until: Option, limit: usize, - ) -> Result> { + ) -> Result { datapoint_info!( "blockstore-rpc-api", ( @@ -2422,7 +2446,7 @@ impl Blockstore { let transaction_status = self.get_transaction_status(before, &confirmed_unrooted_slots)?; match transaction_status { - None => return Ok(vec![]), + None => return Ok(SignatureInfosForAddress::default()), Some((slot, _)) => { let mut slot_signatures = self.get_sorted_block_signatures(slot)?; if let Some(pos) = slot_signatures.iter().position(|&x| x == before) { @@ -2614,7 +2638,10 @@ impl Blockstore { ) ); - Ok(infos) + Ok(SignatureInfosForAddress { + infos, + found_before: true, // if `before` signature was not found, this method returned early + }) } pub fn read_rewards(&self, index: Slot) -> Result> { @@ -3008,13 +3035,8 @@ impl Blockstore { // Returns the existing shred if `new_shred` is not equal to the existing shred at the // given slot and index as this implies the leader generated two different shreds with // the same slot and index - pub fn is_shred_duplicate( - &self, - slot: u64, - index: u32, - mut payload: Vec, - shred_type: ShredType, - ) -> Option> { + pub fn is_shred_duplicate(&self, shred: ShredId, mut payload: Vec) -> Option> { + let (slot, index, shred_type) = shred.unwrap(); let existing_shred = match shred_type { ShredType::Data => self.get_data_shred(slot, index as u64), ShredType::Code => self.get_coding_shred(slot, index as u64), @@ -3161,7 +3183,7 @@ fn update_completed_data_indexes( .filter(|ix| { let (begin, end) = (ix[0] as u64, ix[1] as u64); let num_shreds = (end - begin) as usize; - received_data_shreds.present_in_bounds(begin..end) == num_shreds + received_data_shreds.range(begin..end).count() == num_shreds }) .map(|ix| (ix[0], ix[1] - 1)) .collect() @@ -3186,20 +3208,11 @@ fn update_slot_meta( slot_meta.first_shred_timestamp = timestamp() - slot_time_elapsed; } slot_meta.consumed = new_consumed; - slot_meta.last_index = { - // If the last index in the slot hasn't been set before, then - // set it to this shred index - if slot_meta.last_index == std::u64::MAX { - if is_last_in_slot { - u64::from(index) - } else { - std::u64::MAX - } - } else { - slot_meta.last_index - } - }; - + // If the last index in the slot hasn't been set before, then + // set it to this shred index + if is_last_in_slot && slot_meta.last_index.is_none() { + slot_meta.last_index = Some(u64::from(index)); + } update_completed_data_indexes( is_last_in_slot || is_last_in_data, index, @@ -3244,18 +3257,18 @@ fn get_slot_meta_entry<'a>( // Store a 2-tuple of the metadata (working copy, backup copy) if let Some(mut meta) = meta_cf.get(slot).expect("Expect database get to succeed") { let backup = Some(meta.clone()); - // If parent_slot == std::u64::MAX, then this is one of the orphans inserted + // If parent_slot == None, then this is one of the orphans inserted // during the chaining process, see the function find_slot_meta_in_cached_state() // for details. Slots that are orphans are missing a parent_slot, so we should // fill in the parent now that we know it. if is_orphan(&meta) { - meta.parent_slot = parent_slot; + meta.parent_slot = Some(parent_slot); } SlotMetaWorkingSetEntry::new(Rc::new(RefCell::new(meta)), backup) } else { SlotMetaWorkingSetEntry::new( - Rc::new(RefCell::new(SlotMeta::new(slot, parent_slot))), + Rc::new(RefCell::new(SlotMeta::new(slot, Some(parent_slot)))), None, ) } @@ -3426,8 +3439,8 @@ fn handle_chaining_for_slot( // 1) This is a new slot // 2) slot != 0 // then try to chain this slot to a previous slot - if slot != 0 { - let prev_slot = meta_mut.parent_slot; + if slot != 0 && meta_mut.parent_slot.is_some() { + let prev_slot = meta_mut.parent_slot.unwrap(); // Check if the slot represented by meta_mut is either a new slot or a orphan. // In both cases we need to run the chaining logic b/c the parent on the slot was @@ -3521,7 +3534,7 @@ where fn is_orphan(meta: &SlotMeta) -> bool { // If we have no parent, then this is the head of a detached chain of // slots - !meta.is_parent_set() + meta.parent_slot.is_none() } // 1) Chain current_slot to the previous slot defined by prev_slot_meta @@ -3578,7 +3591,13 @@ pub fn create_new_ledger( let shredder = Shredder::new(0, 0, 0, version).unwrap(); let shreds = shredder - .entries_to_shreds(&Keypair::new(), &entries, true, 0) + .entries_to_shreds( + &Keypair::new(), + &entries, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ) .0; assert!(shreds.last().unwrap().last_in_slot()); @@ -3807,7 +3826,13 @@ pub fn entries_to_test_shreds( ) -> Vec { Shredder::new(slot, parent_slot, 0, version) .unwrap() - .entries_to_shreds(&Keypair::new(), &entries, is_full_slot, 0) + .entries_to_shreds( + &Keypair::new(), + &entries, + is_full_slot, + 0, // next_shred_index, + 0, // next_code_index + ) .0 } @@ -4037,16 +4062,16 @@ pub mod tests { let num_shreds = shreds_per_slot[i as usize]; assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); if i == num_slots - 1 { assert!(meta.next_slots.is_empty()); } else { assert_eq!(meta.next_slots, vec![i + 1]); } if i == 0 { - assert_eq!(meta.parent_slot, 0); + assert_eq!(meta.parent_slot, Some(0)); } else { - assert_eq!(meta.parent_slot, i - 1); + assert_eq!(meta.parent_slot, Some(i - 1)); } assert_eq!( @@ -4104,7 +4129,7 @@ pub mod tests { let blockstore = Blockstore::open(ledger_path.path()).unwrap(); // Test meta column family - let meta = SlotMeta::new(0, 1); + let meta = SlotMeta::new(0, Some(1)); blockstore.meta_cf.put(0, &meta).unwrap(); let result = blockstore .meta_cf @@ -4261,8 +4286,8 @@ pub mod tests { .expect("Expected new metadata object to exist"); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); - assert_eq!(meta.parent_slot, 0); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.parent_slot, Some(0)); + assert_eq!(meta.last_index, Some(num_shreds - 1)); assert!(meta.next_slots.is_empty()); assert!(meta.is_connected); } @@ -4287,12 +4312,12 @@ pub mod tests { .meta(0) .unwrap() .expect("Expected metadata object to exist"); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); if i != 0 { assert_eq!(result.len(), 0); assert!(meta.consumed == 0 && meta.received == num_shreds as u64); } else { - assert_eq!(meta.parent_slot, 0); + assert_eq!(meta.parent_slot, Some(0)); assert_eq!(result, entries); assert!(meta.consumed == num_shreds as u64 && meta.received == num_shreds as u64); } @@ -4465,9 +4490,9 @@ pub mod tests { } assert_eq!(meta.consumed, 0); if num_shreds % 2 == 0 { - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.last_index, Some(num_shreds - 1)); } else { - assert_eq!(meta.last_index, std::u64::MAX); + assert_eq!(meta.last_index, None); } blockstore.insert_shreds(even_shreds, None, false).unwrap(); @@ -4480,8 +4505,8 @@ pub mod tests { let meta = blockstore.meta(slot).unwrap().unwrap(); assert_eq!(meta.received, num_shreds); assert_eq!(meta.consumed, num_shreds); - assert_eq!(meta.parent_slot, parent_slot); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.parent_slot, Some(parent_slot)); + assert_eq!(meta.last_index, Some(num_shreds - 1)); } } @@ -4734,8 +4759,8 @@ pub mod tests { assert!(s1.next_slots.is_empty()); // Slot 1 is not trunk because slot 0 hasn't been inserted yet assert!(!s1.is_connected); - assert_eq!(s1.parent_slot, 0); - assert_eq!(s1.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s1.parent_slot, Some(0)); + assert_eq!(s1.last_index, Some(shreds_per_slot as u64 - 1)); // 2) Write to the second slot let shreds2 = shreds @@ -4746,16 +4771,16 @@ pub mod tests { assert!(s2.next_slots.is_empty()); // Slot 2 is not trunk because slot 0 hasn't been inserted yet assert!(!s2.is_connected); - assert_eq!(s2.parent_slot, 1); - assert_eq!(s2.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s2.parent_slot, Some(1)); + assert_eq!(s2.last_index, Some(shreds_per_slot as u64 - 1)); // Check the first slot again, it should chain to the second slot, // but still isn't part of the trunk let s1 = blockstore.meta(1).unwrap().unwrap(); assert_eq!(s1.next_slots, vec![2]); assert!(!s1.is_connected); - assert_eq!(s1.parent_slot, 0); - assert_eq!(s1.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s1.parent_slot, Some(0)); + assert_eq!(s1.last_index, Some(shreds_per_slot as u64 - 1)); // 3) Write to the zeroth slot, check that every slot // is now part of the trunk @@ -4767,11 +4792,11 @@ pub mod tests { assert_eq!(s.next_slots, vec![i + 1]); } if i == 0 { - assert_eq!(s.parent_slot, 0); + assert_eq!(s.parent_slot, Some(0)); } else { - assert_eq!(s.parent_slot, i - 1); + assert_eq!(s.parent_slot, Some(i - 1)); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); assert!(s.is_connected); } } @@ -4819,10 +4844,10 @@ pub mod tests { let s = blockstore.meta(i as u64).unwrap().unwrap(); if i % 2 == 0 { assert_eq!(s.next_slots, vec![i as u64 + 1]); - assert_eq!(s.parent_slot, std::u64::MAX); + assert_eq!(s.parent_slot, None); } else { assert!(s.next_slots.is_empty()); - assert_eq!(s.parent_slot, i - 1); + assert_eq!(s.parent_slot, Some(i - 1)); } if i == 0 { @@ -4848,11 +4873,11 @@ pub mod tests { } if i == 0 { - assert_eq!(s.parent_slot, 0); + assert_eq!(s.parent_slot, Some(0)); } else { - assert_eq!(s.parent_slot, i - 1); + assert_eq!(s.parent_slot, Some(i - 1)); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); assert!(s.is_connected); } } @@ -4896,12 +4921,12 @@ pub mod tests { } if i == 0 { - assert_eq!(s.parent_slot, 0); + assert_eq!(s.parent_slot, Some(0)); } else { - assert_eq!(s.parent_slot, i - 1); + assert_eq!(s.parent_slot, Some(i - 1)); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); // Other than slot 0, no slots should be part of the trunk if i != 0 { @@ -4932,12 +4957,12 @@ pub mod tests { } if i == 0 { - assert_eq!(s.parent_slot, 0); + assert_eq!(s.parent_slot, Some(0)); } else { - assert_eq!(s.parent_slot, i - 1); + assert_eq!(s.parent_slot, Some(i - 1)); } - assert_eq!(s.last_index, shreds_per_slot as u64 - 1); + assert_eq!(s.last_index, Some(shreds_per_slot as u64 - 1)); } } } @@ -5016,7 +5041,7 @@ pub mod tests { (slot - 1) / branching_factor } }; - assert_eq!(slot_meta.parent_slot, slot_parent); + assert_eq!(slot_meta.parent_slot, Some(slot_parent)); let expected_children: HashSet<_> = { if slot >= last_level { @@ -5050,7 +5075,7 @@ pub mod tests { // Slot doesn't exist assert!(blockstore.get_slots_since(&[0]).unwrap().is_empty()); - let mut meta0 = SlotMeta::new(0, 0); + let mut meta0 = SlotMeta::new(0, Some(0)); blockstore.meta_cf.put(0, &meta0).unwrap(); // Slot exists, chains to nothing @@ -5064,7 +5089,7 @@ pub mod tests { assert_eq!(blockstore.get_slots_since(&[0]).unwrap(), expected); assert_eq!(blockstore.get_slots_since(&[0, 1]).unwrap(), expected); - let mut meta3 = SlotMeta::new(3, 1); + let mut meta3 = SlotMeta::new(3, Some(1)); meta3.next_slots = vec![10, 5]; blockstore.meta_cf.put(3, &meta3).unwrap(); let expected: HashMap> = vec![(0, vec![1, 2]), (3, vec![10, 5])] @@ -5191,12 +5216,12 @@ pub mod tests { let meta = blockstore.meta(i).unwrap().unwrap(); assert_eq!(meta.received, 1); - assert_eq!(meta.last_index, 0); + assert_eq!(meta.last_index, Some(0)); if i != 0 { - assert_eq!(meta.parent_slot, i - 1); + assert_eq!(meta.parent_slot, Some(i - 1)); assert_eq!(meta.consumed, 1); } else { - assert_eq!(meta.parent_slot, 0); + assert_eq!(meta.parent_slot, Some(0)); assert_eq!(meta.consumed, num_shreds_per_slot); } } @@ -5448,7 +5473,7 @@ pub mod tests { true, // is_last_in_slot 0, // reference_tick shred5.common_header.version, - shred5.common_header.fec_set_index, + shred5.fec_set_index(), ); assert!(blockstore.should_insert_data_shred( &empty_shred, @@ -5500,7 +5525,7 @@ pub mod tests { // Trying to insert a shred with index > the "is_last" shred should fail if shred8.is_data() { - shred8.set_slot(slot_meta.last_index + 1); + shred8.set_slot(slot_meta.last_index.unwrap() + 1); } else { panic!("Shred in unexpected format") } @@ -5554,12 +5579,19 @@ pub mod tests { let blockstore = Blockstore::open(ledger_path.path()).unwrap(); let slot = 1; - let (shred, coding) = Shredder::new_coding_shred_header(slot, 11, 11, 11, 11, 0); + let (shred, coding) = Shredder::new_coding_shred_header( + slot, 11, // index + 11, // fec_set_index + 11, // num_data_shreds + 11, // num_coding_shreds + 8, // position + 0, // version + ); let coding_shred = Shred::new_empty_from_header(shred, DataShredHeader::default(), coding); let mut erasure_metas = HashMap::new(); let mut index_working_set = HashMap::new(); - let mut just_received_coding_shreds = HashMap::new(); + let mut just_received_shreds = HashMap::new(); let mut write_batch = blockstore.db.batch().unwrap(); let mut index_meta_time = 0; assert!(blockstore.check_insert_coding_shred( @@ -5567,7 +5599,7 @@ pub mod tests { &mut erasure_metas, &mut index_working_set, &mut write_batch, - &mut just_received_coding_shreds, + &mut just_received_shreds, &mut index_meta_time, &|_shred| { panic!("no dupes"); @@ -5585,7 +5617,7 @@ pub mod tests { &mut erasure_metas, &mut index_working_set, &mut write_batch, - &mut just_received_coding_shreds, + &mut just_received_shreds, &mut index_meta_time, &|_shred| { counter.fetch_add(1, Ordering::Relaxed); @@ -5604,7 +5636,14 @@ pub mod tests { let last_root = RwLock::new(0); let slot = 1; - let (mut shred, coding) = Shredder::new_coding_shred_header(slot, 11, 11, 11, 11, 0); + let (mut shred, coding) = Shredder::new_coding_shred_header( + slot, 11, // index + 11, // fec_set_index + 11, // num_data_shreds + 11, // num_coding_shreds + 8, // position + 0, // version + ); let coding_shred = Shred::new_empty_from_header(shred.clone(), DataShredHeader::default(), coding.clone()); @@ -5650,7 +5689,7 @@ pub mod tests { DataShredHeader::default(), coding.clone(), ); - let index = coding_shred.index() - coding_shred.common_header.fec_set_index - 1; + let index = coding_shred.index() - coding_shred.fec_set_index() - 1; coding_shred.set_index(index as u32); assert!(!Blockstore::should_insert_coding_shred( @@ -5680,8 +5719,7 @@ pub mod tests { DataShredHeader::default(), coding.clone(), ); - let num_coding_shreds = - coding_shred.common_header.index - coding_shred.common_header.fec_set_index; + let num_coding_shreds = coding_shred.index() - coding_shred.fec_set_index(); coding_shred.coding_header.num_coding_shreds = num_coding_shreds as u16; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, @@ -5698,7 +5736,9 @@ pub mod tests { coding.clone(), ); coding_shred.common_header.fec_set_index = std::u32::MAX - 1; + coding_shred.coding_header.num_data_shreds = 2; coding_shred.coding_header.num_coding_shreds = 3; + coding_shred.coding_header.position = 1; coding_shred.common_header.index = std::u32::MAX - 1; assert!(!Blockstore::should_insert_coding_shred( &coding_shred, @@ -5749,7 +5789,7 @@ pub mod tests { assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); - assert_eq!(slot_meta.last_index, num_shreds - 1); + assert_eq!(slot_meta.last_index, Some(num_shreds - 1)); assert!(slot_meta.is_full()); let (shreds, _) = make_slot_entries(0, 0, 22); @@ -5758,7 +5798,7 @@ pub mod tests { assert_eq!(slot_meta.consumed, num_shreds); assert_eq!(slot_meta.received, num_shreds); - assert_eq!(slot_meta.last_index, num_shreds - 1); + assert_eq!(slot_meta.last_index, Some(num_shreds - 1)); assert!(slot_meta.is_full()); assert!(blockstore.has_duplicate_shreds_in_slot(0)); @@ -6007,10 +6047,7 @@ pub mod tests { .set_roots(vec![slot - 1, slot, slot + 1].iter()) .unwrap(); - let parent_meta = SlotMeta { - parent_slot: std::u64::MAX, - ..SlotMeta::default() - }; + let parent_meta = SlotMeta::default(); blockstore .put_meta_bytes(slot - 1, &serialize(&parent_meta).unwrap()) .unwrap(); @@ -6545,13 +6582,13 @@ pub mod tests { // 2 (root) // | // 3 - let meta0 = SlotMeta::new(0, 0); + let meta0 = SlotMeta::new(0, Some(0)); blockstore.meta_cf.put(0, &meta0).unwrap(); - let meta1 = SlotMeta::new(1, 0); + let meta1 = SlotMeta::new(1, Some(0)); blockstore.meta_cf.put(1, &meta1).unwrap(); - let meta2 = SlotMeta::new(2, 0); + let meta2 = SlotMeta::new(2, Some(0)); blockstore.meta_cf.put(2, &meta2).unwrap(); - let meta3 = SlotMeta::new(3, 2); + let meta3 = SlotMeta::new(3, Some(2)); blockstore.meta_cf.put(3, &meta3).unwrap(); blockstore.set_roots(vec![0, 2].iter()).unwrap(); @@ -6728,13 +6765,13 @@ pub mod tests { let signature2 = Signature::new(&[3u8; 64]); // Insert rooted slots 0..=3 with no fork - let meta0 = SlotMeta::new(0, 0); + let meta0 = SlotMeta::new(0, Some(0)); blockstore.meta_cf.put(0, &meta0).unwrap(); - let meta1 = SlotMeta::new(1, 0); + let meta1 = SlotMeta::new(1, Some(0)); blockstore.meta_cf.put(1, &meta1).unwrap(); - let meta2 = SlotMeta::new(2, 1); + let meta2 = SlotMeta::new(2, Some(1)); blockstore.meta_cf.put(2, &meta2).unwrap(); - let meta3 = SlotMeta::new(3, 2); + let meta3 = SlotMeta::new(3, Some(2)); blockstore.meta_cf.put(3, &meta3).unwrap(); blockstore.set_roots(vec![0, 1, 2, 3].iter()).unwrap(); @@ -7390,7 +7427,7 @@ pub mod tests { let highest_confirmed_root = 8; // Fetch all rooted signatures for address 0 at once... - let all0 = blockstore + let sig_infos = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, @@ -7399,6 +7436,8 @@ pub mod tests { usize::MAX, ) .unwrap(); + assert!(sig_infos.found_before); + let all0 = sig_infos.infos; assert_eq!(all0.len(), 12); // Fetch all rooted signatures for address 1 at once... @@ -7410,12 +7449,13 @@ pub mod tests { None, usize::MAX, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(all1.len(), 12); // Fetch all signatures for address 0 individually for i in 0..all0.len() { - let results = blockstore + let sig_infos = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, @@ -7428,6 +7468,8 @@ pub mod tests { 1, ) .unwrap(); + assert!(sig_infos.found_before); + let results = sig_infos.infos; assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } @@ -7449,12 +7491,13 @@ pub mod tests { }, 10, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } - assert!(blockstore + let sig_infos = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, @@ -7462,8 +7505,9 @@ pub mod tests { None, 1, ) - .unwrap() - .is_empty()); + .unwrap(); + assert!(sig_infos.found_before); + assert!(sig_infos.infos.is_empty()); assert!(blockstore .get_confirmed_signatures_for_address2( @@ -7474,6 +7518,7 @@ pub mod tests { 2, ) .unwrap() + .infos .is_empty()); // Fetch all signatures for address 0, three at a time @@ -7491,7 +7536,8 @@ pub mod tests { None, 3, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(results.len(), 3); assert_eq!(results[0], all0[i]); assert_eq!(results[1], all0[i + 1]); @@ -7513,7 +7559,8 @@ pub mod tests { None, 2, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(results.len(), 2); assert_eq!(results[0].slot, results[1].slot); assert!(results[0].signature >= results[1].signature); @@ -7522,7 +7569,7 @@ pub mod tests { } // A search for address 0 with `before` and/or `until` signatures from address1 should also work - let results = blockstore + let sig_infos = blockstore .get_confirmed_signatures_for_address2( address0, highest_confirmed_root, @@ -7531,6 +7578,8 @@ pub mod tests { usize::MAX, ) .unwrap(); + assert!(sig_infos.found_before); + let results = sig_infos.infos; // The exact number of results returned is variable, based on the sort order of the // random signatures that are generated assert!(!results.is_empty()); @@ -7543,7 +7592,8 @@ pub mod tests { Some(all1[4].signature), usize::MAX, ) - .unwrap(); + .unwrap() + .infos; assert!(results2.len() < results.len()); // Duplicate all tests using confirmed signatures @@ -7558,7 +7608,8 @@ pub mod tests { None, usize::MAX, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(all0.len(), 14); // Fetch all signatures for address 1 at once... @@ -7570,7 +7621,8 @@ pub mod tests { None, usize::MAX, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(all1.len(), 14); // Fetch all signatures for address 0 individually @@ -7587,7 +7639,8 @@ pub mod tests { None, 1, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } @@ -7609,7 +7662,8 @@ pub mod tests { }, 10, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(results.len(), 1); assert_eq!(results[0], all0[i], "Unexpected result for {}", i); } @@ -7623,6 +7677,7 @@ pub mod tests { 1, ) .unwrap() + .infos .is_empty()); assert!(blockstore @@ -7634,6 +7689,7 @@ pub mod tests { 2, ) .unwrap() + .infos .is_empty()); // Fetch all signatures for address 0, three at a time @@ -7651,7 +7707,8 @@ pub mod tests { None, 3, ) - .unwrap(); + .unwrap() + .infos; if i < 12 { assert_eq!(results.len(), 3); assert_eq!(results[2], all0[i + 2]); @@ -7677,7 +7734,8 @@ pub mod tests { None, 2, ) - .unwrap(); + .unwrap() + .infos; assert_eq!(results.len(), 2); assert_eq!(results[0].slot, results[1].slot); assert!(results[0].signature >= results[1].signature); @@ -7694,7 +7752,8 @@ pub mod tests { None, usize::MAX, ) - .unwrap(); + .unwrap() + .infos; // The exact number of results returned is variable, based on the sort order of the // random signatures that are generated assert!(!results.is_empty()); @@ -7707,8 +7766,26 @@ pub mod tests { Some(all1[4].signature), usize::MAX, ) - .unwrap(); + .unwrap() + .infos; assert!(results2.len() < results.len()); + + // Remove signature + blockstore + .address_signatures_cf + .delete((0, address0, 2, all0[0].signature)) + .unwrap(); + let sig_infos = blockstore + .get_confirmed_signatures_for_address2( + address0, + highest_confirmed_root, + Some(all0[0].signature), + None, + usize::MAX, + ) + .unwrap(); + assert!(!sig_infos.found_before); + assert!(sig_infos.infos.is_empty()); } #[test] @@ -8007,8 +8084,13 @@ pub mod tests { let entries = make_slot_entries_with_transactions(num_entries); let leader_keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, parent_slot, 0, 0).unwrap(); - let (data_shreds, coding_shreds, _) = - shredder.entries_to_shreds(&leader_keypair, &entries, true, 0); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &leader_keypair, + &entries, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let genesis_config = create_genesis_config(2).genesis_config; let bank = Arc::new(Bank::new_for_tests(&genesis_config)); @@ -8034,7 +8116,7 @@ pub mod tests { // Test that iterator and individual shred lookup yield same set assert!(blockstore.get_data_shred(slot, index).unwrap().is_some()); // Test that the data index has current shred accounted for - assert!(shred_index.data().is_present(index)); + assert!(shred_index.data().contains(index)); } // Test the data index doesn't have anything extra @@ -8048,7 +8130,7 @@ pub mod tests { // Test that the iterator and individual shred lookup yield same set assert!(blockstore.get_coding_shred(slot, index).unwrap().is_some()); // Test that the coding index has current shred accounted for - assert!(shred_index.coding().is_present(index)); + assert!(shred_index.coding().contains(index)); } // Test the data index doesn't have anything extra @@ -8063,9 +8145,20 @@ pub mod tests { let entries2 = make_slot_entries_with_transactions(1); let leader_keypair = Arc::new(Keypair::new()); let shredder = Shredder::new(slot, 0, 0, 0).unwrap(); - let (shreds, _, _) = shredder.entries_to_shreds(&leader_keypair, &entries1, true, 0); - let (duplicate_shreds, _, _) = - shredder.entries_to_shreds(&leader_keypair, &entries2, true, 0); + let (shreds, _) = shredder.entries_to_shreds( + &leader_keypair, + &entries1, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index, + ); + let (duplicate_shreds, _) = shredder.entries_to_shreds( + &leader_keypair, + &entries2, + true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let shred = shreds[0].clone(); let duplicate_shred = duplicate_shreds[0].clone(); let non_duplicate_shred = shred.clone(); @@ -8083,19 +8176,15 @@ pub mod tests { // Check if shreds are duplicated assert_eq!( blockstore.is_shred_duplicate( - slot, - 0, + ShredId::new(slot, /*index:*/ 0, duplicate_shred.shred_type()), duplicate_shred.payload.clone(), - duplicate_shred.shred_type(), ), Some(shred.payload.to_vec()) ); assert!(blockstore .is_shred_duplicate( - slot, - 0, - non_duplicate_shred.payload.clone(), - non_duplicate_shred.shred_type(), + ShredId::new(slot, /*index:*/ 0, non_duplicate_shred.shred_type()), + non_duplicate_shred.payload, ) .is_none()); @@ -8162,7 +8251,7 @@ pub mod tests { let mut shred_index = ShredIndex::default(); for i in 0..10 { - shred_index.set_present(i as u64, true); + shred_index.insert(i as u64); assert_eq!( update_completed_data_indexes(true, i, &shred_index, &mut completed_data_indexes), vec![(i, i)] @@ -8176,21 +8265,21 @@ pub mod tests { let mut completed_data_indexes = BTreeSet::default(); let mut shred_index = ShredIndex::default(); - shred_index.set_present(4, true); + shred_index.insert(4); assert!( update_completed_data_indexes(false, 4, &shred_index, &mut completed_data_indexes) .is_empty() ); assert!(completed_data_indexes.is_empty()); - shred_index.set_present(2, true); + shred_index.insert(2); assert!( update_completed_data_indexes(false, 2, &shred_index, &mut completed_data_indexes) .is_empty() ); assert!(completed_data_indexes.is_empty()); - shred_index.set_present(3, true); + shred_index.insert(3); assert!( update_completed_data_indexes(true, 3, &shred_index, &mut completed_data_indexes) .is_empty() @@ -8199,7 +8288,7 @@ pub mod tests { // Inserting data complete shred 1 now confirms the range of shreds [2, 3] // is part of the same data set - shred_index.set_present(1, true); + shred_index.insert(1); assert_eq!( update_completed_data_indexes(true, 1, &shred_index, &mut completed_data_indexes), vec![(2, 3)] @@ -8208,7 +8297,7 @@ pub mod tests { // Inserting data complete shred 0 now confirms the range of shreds [0] // is part of the same data set - shred_index.set_present(0, true); + shred_index.insert(0); assert_eq!( update_completed_data_indexes(true, 0, &shred_index, &mut completed_data_indexes), vec![(0, 0), (1, 1)] @@ -8375,8 +8464,14 @@ pub mod tests { let ledger_path = get_tmp_ledger_path_auto_delete!(); let blockstore = Blockstore::open(ledger_path.path()).unwrap(); - let coding1 = Shredder::generate_coding_shreds(&shreds, false); - let coding2 = Shredder::generate_coding_shreds(&shreds, true); + let coding1 = Shredder::generate_coding_shreds( + &shreds, false, // is_last_in_slot + 0, // next_code_index + ); + let coding2 = Shredder::generate_coding_shreds( + &shreds, true, // is_last_in_slot + 0, // next_code_index + ); for shred in &shreds { info!("shred {:?}", shred); } @@ -8444,8 +8539,8 @@ pub mod tests { assert_eq!(blockstore.get_slot_entries(0, 0).unwrap(), vec![]); assert_eq!(meta.consumed, 0); assert_eq!(meta.received, last_index + 1); - assert_eq!(meta.parent_slot, 0); - assert_eq!(meta.last_index, last_index); + assert_eq!(meta.parent_slot, Some(0)); + assert_eq!(meta.last_index, Some(last_index)); assert!(!blockstore.is_full(0)); } @@ -8460,8 +8555,8 @@ pub mod tests { let meta = blockstore.meta(0).unwrap().unwrap(); assert_eq!(meta.consumed, num_shreds); assert_eq!(meta.received, num_shreds); - assert_eq!(meta.parent_slot, 0); - assert_eq!(meta.last_index, num_shreds - 1); + assert_eq!(meta.parent_slot, Some(0)); + assert_eq!(meta.last_index, Some(num_shreds - 1)); assert!(blockstore.is_full(0)); assert!(!blockstore.is_dead(0)); } @@ -8508,7 +8603,7 @@ pub mod tests { assert_eq!(meta.consumed, shreds.len() as u64); let shreds_index = blockstore.get_index(slot).unwrap().unwrap(); for i in 0..shreds.len() as u64 { - assert!(shreds_index.data().is_present(i)); + assert!(shreds_index.data().contains(i)); } // Cleanup the slot @@ -8563,10 +8658,12 @@ pub mod tests { std::u8::MAX - even_smaller_last_shred_duplicate.payload[0]; assert!(blockstore .is_shred_duplicate( - slot, - even_smaller_last_shred_duplicate.index(), + ShredId::new( + slot, + even_smaller_last_shred_duplicate.index(), + ShredType::Data + ), even_smaller_last_shred_duplicate.payload.clone(), - ShredType::Data, ) .is_some()); blockstore diff --git a/ledger/src/blockstore_meta.rs b/ledger/src/blockstore_meta.rs index c1b1f4c63494b1..c6ebc6bcdcc55f 100644 --- a/ledger/src/blockstore_meta.rs +++ b/ledger/src/blockstore_meta.rs @@ -1,6 +1,9 @@ use { - crate::erasure::ErasureConfig, - serde::{Deserialize, Serialize}, + crate::{ + erasure::ErasureConfig, + shred::{Shred, ShredType}, + }, + serde::{Deserialize, Deserializer, Serialize, Serializer}, solana_sdk::{clock::Slot, hash::Hash}, std::{ collections::BTreeSet, @@ -24,9 +27,13 @@ pub struct SlotMeta { // The timestamp of the first time a shred was added for this slot pub first_shred_timestamp: u64, // The index of the shred that is flagged as the last shred for this slot. - pub last_index: u64, + // None until the shred with LAST_SHRED_IN_SLOT flag is received. + #[serde(with = "serde_compat")] + pub last_index: Option, // The slot height of the block this one derives from. - pub parent_slot: Slot, + // The parent slot of the head of a detached chain of slots is None. + #[serde(with = "serde_compat")] + pub parent_slot: Option, // The list of slots, each of which contains a block that derives // from this one. pub next_slots: Vec, @@ -37,6 +44,27 @@ pub struct SlotMeta { pub completed_data_indexes: BTreeSet, } +// Serde implementation of serialize and deserialize for Option +// where None is represented as u64::MAX; for backward compatibility. +mod serde_compat { + use super::*; + + pub(super) fn serialize(val: &Option, serializer: S) -> Result + where + S: Serializer, + { + val.unwrap_or(u64::MAX).serialize(serializer) + } + + pub(super) fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + { + let val = u64::deserialize(deserializer)?; + Ok((val != u64::MAX).then(|| val)) + } +} + #[derive(Clone, Debug, Default, Deserialize, Serialize, PartialEq)] /// Index recording presence/absence of shreds pub struct Index { @@ -56,9 +84,8 @@ pub struct ShredIndex { pub struct ErasureMeta { /// Which erasure set in the slot this is set_index: u64, - /// Deprecated field. - #[serde(rename = "first_coding_index")] - __unused_first_coding_index: u64, + /// First coding index in the FEC set + first_coding_index: u64, /// Size of shards in this erasure set #[serde(rename = "size")] __unused_size: usize, @@ -124,10 +151,10 @@ impl Index { &self.coding } - pub fn data_mut(&mut self) -> &mut ShredIndex { + pub(crate) fn data_mut(&mut self) -> &mut ShredIndex { &mut self.data } - pub fn coding_mut(&mut self) -> &mut ShredIndex { + pub(crate) fn coding_mut(&mut self) -> &mut ShredIndex { &mut self.coding } } @@ -137,71 +164,48 @@ impl ShredIndex { self.index.len() } - pub fn present_in_bounds(&self, bounds: impl RangeBounds) -> usize { - self.index.range(bounds).count() + pub(crate) fn range(&self, bounds: R) -> impl Iterator + where + R: RangeBounds, + { + self.index.range(bounds) } - pub fn is_present(&self, index: u64) -> bool { + pub(crate) fn contains(&self, index: u64) -> bool { self.index.contains(&index) } - pub fn set_present(&mut self, index: u64, presence: bool) { - if presence { - self.index.insert(index); - } else { - self.index.remove(&index); - } - } - - pub fn set_many_present(&mut self, presence: impl IntoIterator) { - for (idx, present) in presence.into_iter() { - self.set_present(idx, present); - } - } - - pub fn largest(&self) -> Option { - self.index.iter().rev().next().copied() + pub(crate) fn insert(&mut self, index: u64) { + self.index.insert(index); } } impl SlotMeta { pub fn is_full(&self) -> bool { - // last_index is std::u64::MAX when it has no information about how + // last_index is None when it has no information about how // many shreds will fill this slot. // Note: A full slot with zero shreds is not possible. - if self.last_index == std::u64::MAX { - return false; - } - // Should never happen - if self.consumed > self.last_index + 1 { + if self + .last_index + .map(|ix| self.consumed > ix + 1) + .unwrap_or_default() + { datapoint_error!( "blockstore_error", ( "error", format!( - "Observed a slot meta with consumed: {} > meta.last_index + 1: {}", + "Observed a slot meta with consumed: {} > meta.last_index + 1: {:?}", self.consumed, - self.last_index + 1 + self.last_index.map(|ix| ix + 1), ), String ) ); } - self.consumed == self.last_index + 1 - } - - pub fn known_last_index(&self) -> Option { - if self.last_index == std::u64::MAX { - None - } else { - Some(self.last_index) - } - } - - pub fn is_parent_set(&self) -> bool { - self.parent_slot != std::u64::MAX + Some(self.consumed) == self.last_index.map(|ix| ix + 1) } pub fn clear_unconfirmed_slot(&mut self) { @@ -210,31 +214,56 @@ impl SlotMeta { std::mem::swap(self, &mut new_self); } - pub(crate) fn new(slot: Slot, parent_slot: Slot) -> Self { + pub(crate) fn new(slot: Slot, parent_slot: Option) -> Self { SlotMeta { slot, parent_slot, is_connected: slot == 0, - last_index: std::u64::MAX, ..SlotMeta::default() } } pub(crate) fn new_orphan(slot: Slot) -> Self { - Self::new(slot, std::u64::MAX) + Self::new(slot, /*parent_slot:*/ None) } } impl ErasureMeta { - pub(crate) fn new(set_index: u64, config: ErasureConfig) -> ErasureMeta { - ErasureMeta { - set_index, - config, - __unused_first_coding_index: 0, - __unused_size: 0, + pub(crate) fn from_coding_shred(shred: &Shred) -> Option { + match shred.shred_type() { + ShredType::Data => None, + ShredType::Code => { + let config = ErasureConfig::new( + usize::from(shred.coding_header.num_data_shreds), + usize::from(shred.coding_header.num_coding_shreds), + ); + let first_coding_index = u64::from(shred.first_coding_index()?); + let erasure_meta = ErasureMeta { + set_index: u64::from(shred.fec_set_index()), + config, + first_coding_index, + __unused_size: 0, + }; + Some(erasure_meta) + } } } + // Returns true if the erasure fields on the shred + // are consistent with the erasure-meta. + pub(crate) fn check_coding_shred(&self, shred: &Shred) -> bool { + let mut other = match Self::from_coding_shred(shred) { + Some(erasure_meta) => erasure_meta, + None => return false, + }; + other.__unused_size = self.__unused_size; + // Ignore first_coding_index field for now to be backward compatible. + // TODO remove this once cluster is upgraded to always populate + // first_coding_index field. + other.first_coding_index = self.first_coding_index; + self == &other + } + pub(crate) fn config(&self) -> ErasureConfig { self.config } @@ -246,16 +275,23 @@ impl ErasureMeta { pub(crate) fn coding_shreds_indices(&self) -> Range { let num_coding = self.config.num_coding() as u64; - self.set_index..self.set_index + num_coding + // first_coding_index == 0 may imply that the field is not populated. + // self.set_index to be backward compatible. + // TODO remove this once cluster is upgraded to always populate + // first_coding_index field. + let first_coding_index = if self.first_coding_index == 0 { + self.set_index + } else { + self.first_coding_index + }; + first_coding_index..first_coding_index + num_coding } pub(crate) fn status(&self, index: &Index) -> ErasureMetaStatus { use ErasureMetaStatus::*; - let num_coding = index - .coding() - .present_in_bounds(self.coding_shreds_indices()); - let num_data = index.data().present_in_bounds(self.data_shreds_indices()); + let num_coding = index.coding().range(self.coding_shreds_indices()).count(); + let num_data = index.data().range(self.data_shreds_indices()).count(); let (data_missing, num_needed) = ( self.config.num_data().saturating_sub(num_data), @@ -306,7 +342,6 @@ mod test { use { super::*, rand::{seq::SliceRandom, thread_rng}, - std::iter::repeat, }; #[test] @@ -316,7 +351,12 @@ mod test { let set_index = 0; let erasure_config = ErasureConfig::new(8, 16); - let e_meta = ErasureMeta::new(set_index, erasure_config); + let e_meta = ErasureMeta { + set_index, + first_coding_index: set_index, + config: erasure_config, + __unused_size: 0, + }; let mut rng = thread_rng(); let mut index = Index::new(0); @@ -325,35 +365,35 @@ mod test { assert_eq!(e_meta.status(&index), StillNeed(erasure_config.num_data())); - index - .data_mut() - .set_many_present(data_indexes.clone().zip(repeat(true))); + for ix in data_indexes.clone() { + index.data_mut().insert(ix); + } assert_eq!(e_meta.status(&index), DataFull); - index - .coding_mut() - .set_many_present(coding_indexes.clone().zip(repeat(true))); + for ix in coding_indexes.clone() { + index.coding_mut().insert(ix); + } for &idx in data_indexes .clone() .collect::>() .choose_multiple(&mut rng, erasure_config.num_data()) { - index.data_mut().set_present(idx, false); + index.data_mut().index.remove(&idx); assert_eq!(e_meta.status(&index), CanRecover); } - index - .data_mut() - .set_many_present(data_indexes.zip(repeat(true))); + for ix in data_indexes { + index.data_mut().insert(ix); + } for &idx in coding_indexes .collect::>() .choose_multiple(&mut rng, erasure_config.num_coding()) { - index.coding_mut().set_present(idx, false); + index.coding_mut().index.remove(&idx); assert_eq!(e_meta.status(&index), DataFull); } diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index 477bc456af4905..e16053d15b8048 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -20,8 +20,8 @@ use { accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, bank::{ - Bank, ExecuteTimings, InnerInstructionsList, RentDebits, TransactionBalancesSet, - TransactionExecutionResult, TransactionLogMessages, TransactionResults, + Bank, ExecuteTimings, RentDebits, TransactionBalancesSet, TransactionExecutionResult, + TransactionResults, }, bank_forks::BankForks, bank_utils, @@ -175,15 +175,14 @@ fn execute_batch( let pre_process_units: u64 = aggregate_total_execution_units(timings); - let (tx_results, balances, inner_instructions, transaction_logs) = - batch.bank().load_execute_and_commit_transactions( - batch, - MAX_PROCESSING_AGE, - transaction_status_sender.is_some(), - transaction_status_sender.is_some(), - transaction_status_sender.is_some(), - timings, - ); + let (tx_results, balances) = batch.bank().load_execute_and_commit_transactions( + batch, + MAX_PROCESSING_AGE, + transaction_status_sender.is_some(), + transaction_status_sender.is_some(), + transaction_status_sender.is_some(), + timings, + ); if bank .feature_set @@ -238,8 +237,6 @@ fn execute_batch( execution_results, balances, token_balances, - inner_instructions, - transaction_logs, rent_debits, ); } @@ -1399,11 +1396,9 @@ pub enum TransactionStatusMessage { pub struct TransactionStatusBatch { pub bank: Arc, pub transactions: Vec, - pub statuses: Vec, + pub execution_results: Vec, pub balances: TransactionBalancesSet, pub token_balances: TransactionTokenBalancesSet, - pub inner_instructions: Option>>, - pub transaction_logs: Option>>, pub rent_debits: Vec, } @@ -1418,29 +1413,28 @@ impl TransactionStatusSender { &self, bank: Arc, transactions: Vec, - statuses: Vec, + mut execution_results: Vec, balances: TransactionBalancesSet, token_balances: TransactionTokenBalancesSet, - inner_instructions: Vec>, - transaction_logs: Vec>, rent_debits: Vec, ) { let slot = bank.slot(); - let (inner_instructions, transaction_logs) = if !self.enable_cpi_and_log_storage { - (None, None) - } else { - (Some(inner_instructions), Some(transaction_logs)) - }; + if !self.enable_cpi_and_log_storage { + execution_results.iter_mut().for_each(|execution_result| { + if let TransactionExecutionResult::Executed(details) = execution_result { + details.log_messages.take(); + details.inner_instructions.take(); + } + }); + } if let Err(e) = self .sender .send(TransactionStatusMessage::Batch(TransactionStatusBatch { bank, transactions, - statuses, + execution_results, balances, token_balances, - inner_instructions, - transaction_logs, rent_debits, })) { @@ -3483,8 +3477,6 @@ pub mod tests { .. }, _balances, - _inner_instructions, - _log_messages, ) = batch.bank().load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, diff --git a/ledger/src/erasure.rs b/ledger/src/erasure.rs index 546139812135ec..cf4052e41c0ed6 100644 --- a/ledger/src/erasure.rs +++ b/ledger/src/erasure.rs @@ -53,18 +53,18 @@ pub struct ErasureConfig { } impl ErasureConfig { - pub fn new(num_data: usize, num_coding: usize) -> ErasureConfig { + pub(crate) fn new(num_data: usize, num_coding: usize) -> ErasureConfig { ErasureConfig { num_data, num_coding, } } - pub fn num_data(self) -> usize { + pub(crate) fn num_data(self) -> usize { self.num_data } - pub fn num_coding(self) -> usize { + pub(crate) fn num_coding(self) -> usize { self.num_coding } } diff --git a/ledger/src/shred.rs b/ledger/src/shred.rs index 73da3c5da02955..d984c850c9a470 100644 --- a/ledger/src/shred.rs +++ b/ledger/src/shred.rs @@ -69,11 +69,7 @@ use { pubkey::Pubkey, signature::{Keypair, Signature, Signer}, }, - std::{ - cell::RefCell, - convert::{TryFrom, TryInto}, - mem::size_of, - }, + std::{cell::RefCell, convert::TryInto, mem::size_of}, thiserror::Error, }; @@ -161,6 +157,9 @@ pub enum ShredError { "invalid parent offset; parent_offset {parent_offset} must be larger than slot {slot}" )] InvalidParentOffset { slot: Slot, parent_offset: u16 }, + + #[error("invalid payload")] + InvalidPayload, } pub type Result = std::result::Result; @@ -222,8 +221,7 @@ pub struct DataShredHeader { pub struct CodingShredHeader { pub num_data_shreds: u16, pub num_coding_shreds: u16, - #[serde(rename = "position")] - __unused: u16, + pub position: u16, } #[derive(Clone, Debug, PartialEq)] @@ -234,6 +232,35 @@ pub struct Shred { pub payload: Vec, } +/// Tuple which uniquely identifies a shred should it exists. +#[derive(Clone, Copy, Eq, Hash, PartialEq)] +pub struct ShredId(Slot, /*shred index:*/ u32, ShredType); + +impl ShredId { + pub(crate) fn new(slot: Slot, index: u32, shred_type: ShredType) -> ShredId { + ShredId(slot, index, shred_type) + } + + pub(crate) fn unwrap(&self) -> (Slot, /*shred index:*/ u32, ShredType) { + (self.0, self.1, self.2) + } +} + +/// Tuple which identifies erasure coding set that the shred belongs to. +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] +pub(crate) struct ErasureSetId(Slot, /*fec_set_index:*/ u32); + +impl ErasureSetId { + pub(crate) fn slot(&self) -> Slot { + self.0 + } + + // Storage key for ErasureMeta in blockstore db. + pub(crate) fn store_key(&self) -> (Slot, /*fec_set_index:*/ u64) { + (self.0, u64::from(self.1)) + } +} + impl Shred { fn deserialize_obj<'de, T>(index: &mut usize, size: usize, buf: &'de [u8]) -> bincode::Result where @@ -341,49 +368,41 @@ impl Shred { let common_header: ShredCommonHeader = Self::deserialize_obj(&mut start, SIZE_OF_COMMON_SHRED_HEADER, &payload)?; - let slot = common_header.slot; // Shreds should be padded out to SHRED_PAYLOAD_SIZE // so that erasure generation/recovery works correctly // But only the data_header.size is stored in blockstore. payload.resize(SHRED_PAYLOAD_SIZE, 0); - let shred = match common_header.shred_type { + let (data_header, coding_header) = match common_header.shred_type { ShredType::Code => { let coding_header: CodingShredHeader = Self::deserialize_obj(&mut start, SIZE_OF_CODING_SHRED_HEADER, &payload)?; - Self { - common_header, - data_header: DataShredHeader::default(), - coding_header, - payload, - } + (DataShredHeader::default(), coding_header) } ShredType::Data => { let data_header: DataShredHeader = Self::deserialize_obj(&mut start, SIZE_OF_DATA_SHRED_HEADER, &payload)?; - if u64::from(data_header.parent_offset) > common_header.slot { - return Err(ShredError::InvalidParentOffset { - slot, - parent_offset: data_header.parent_offset, - }); - } - Self { - common_header, - data_header, - coding_header: CodingShredHeader::default(), - payload, - } + (data_header, CodingShredHeader::default()) } }; - - Ok(shred) + let shred = Self { + common_header, + data_header, + coding_header, + payload, + }; + shred + .sanitize() + .then(|| shred) + .ok_or(ShredError::InvalidPayload) } pub fn new_empty_coding( slot: Slot, index: u32, fec_set_index: u32, - num_data: usize, - num_code: usize, + num_data: u16, + num_code: u16, + position: u16, version: u16, ) -> Self { let (header, coding_header) = Shredder::new_coding_shred_header( @@ -392,6 +411,7 @@ impl Shred { fec_set_index, num_data, num_code, + position, version, ); Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header) @@ -443,17 +463,33 @@ impl Shred { ) } + /// Unique identifier for each shred. + pub fn id(&self) -> ShredId { + ShredId(self.slot(), self.index(), self.shred_type()) + } + pub fn slot(&self) -> Slot { self.common_header.slot } - pub fn parent(&self) -> Option { + pub fn parent(&self) -> Result { match self.shred_type() { ShredType::Data => { - let parent_offset = Slot::try_from(self.data_header.parent_offset); - self.slot().checked_sub(parent_offset.ok()?) + let slot = self.slot(); + let parent_offset = Slot::from(self.data_header.parent_offset); + if parent_offset == 0 && slot != 0 { + return Err(ShredError::InvalidParentOffset { + slot, + parent_offset: 0, + }); + } + slot.checked_sub(parent_offset) + .ok_or(ShredError::InvalidParentOffset { + slot, + parent_offset: self.data_header.parent_offset, + }) } - ShredType::Code => None, + ShredType::Code => Err(ShredError::InvalidShredType), } } @@ -461,19 +497,62 @@ impl Shred { self.common_header.index } + pub(crate) fn fec_set_index(&self) -> u32 { + self.common_header.fec_set_index + } + + pub(crate) fn first_coding_index(&self) -> Option { + match self.shred_type() { + ShredType::Data => None, + // TODO should be: self.index() - self.coding_header.position + // once position field is populated. + ShredType::Code => Some(self.fec_set_index()), + } + } + + // Returns true if the shred passes sanity checks. + pub(crate) fn sanitize(&self) -> bool { + self.erasure_block_index().is_some() + && match self.shred_type() { + ShredType::Data => { + self.parent().is_ok() + && usize::from(self.data_header.size) <= self.payload.len() + } + ShredType::Code => { + u32::from(self.coding_header.num_coding_shreds) + <= 8 * MAX_DATA_SHREDS_PER_FEC_BLOCK + } + } + } + pub fn version(&self) -> u16 { self.common_header.version } + // Identifier for the erasure coding set that the shred belongs to. + pub(crate) fn erasure_set(&self) -> ErasureSetId { + ErasureSetId(self.slot(), self.fec_set_index()) + } + // Returns the block index within the erasure coding set. fn erasure_block_index(&self) -> Option { - let fec_set_index = self.common_header.fec_set_index; - let index = self.index().checked_sub(fec_set_index)? as usize; + let index = self.index().checked_sub(self.fec_set_index())?; + let index = usize::try_from(index).ok()?; match self.shred_type() { ShredType::Data => Some(index), ShredType::Code => { - let num_data_shreds = self.coding_header.num_data_shreds as usize; - let num_coding_shreds = self.coding_header.num_coding_shreds as usize; + // TODO should use first_coding_index once position field is + // populated. + // Assert that the last shred index in the erasure set does not + // overshoot u32. + self.fec_set_index().checked_add(u32::from( + self.coding_header + .num_data_shreds + .max(self.coding_header.num_coding_shreds) + .checked_sub(1)?, + ))?; + let num_data_shreds = usize::from(self.coding_header.num_data_shreds); + let num_coding_shreds = usize::from(self.coding_header.num_coding_shreds); let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; let index = index.checked_add(num_data_shreds)?; (index < fec_set_size).then(|| index) @@ -676,9 +755,13 @@ impl Shredder { entries: &[Entry], is_last_in_slot: bool, next_shred_index: u32, - ) -> (Vec, Vec, u32) { + next_code_index: u32, + ) -> ( + Vec, // data shreds + Vec, // coding shreds + ) { let mut stats = ProcessShredsStats::default(); - let (data_shreds, last_shred_index) = self.entries_to_data_shreds( + let data_shreds = self.entries_to_data_shreds( keypair, entries, is_last_in_slot, @@ -686,10 +769,15 @@ impl Shredder { next_shred_index, // fec_set_offset &mut stats, ); - let coding_shreds = - Self::data_shreds_to_coding_shreds(keypair, &data_shreds, is_last_in_slot, &mut stats) - .unwrap(); - (data_shreds, coding_shreds, last_shred_index) + let coding_shreds = Self::data_shreds_to_coding_shreds( + keypair, + &data_shreds, + is_last_in_slot, + next_code_index, + &mut stats, + ) + .unwrap(); + (data_shreds, coding_shreds) } // Each FEC block has maximum MAX_DATA_SHREDS_PER_FEC_BLOCK shreds. @@ -711,7 +799,7 @@ impl Shredder { // Shred index offset at which FEC sets are generated. fec_set_offset: u32, process_stats: &mut ProcessShredsStats, - ) -> (Vec, u32) { + ) -> Vec { let mut serialize_time = Measure::start("shred_serialize"); let serialized_shreds = bincode::serialize(entries).expect("Expect to serialize all entries"); @@ -759,13 +847,14 @@ impl Shredder { process_stats.serialize_elapsed += serialize_time.as_us(); process_stats.gen_data_elapsed += gen_data_time.as_us(); - (data_shreds, last_shred_index + 1) + data_shreds } pub fn data_shreds_to_coding_shreds( keypair: &Keypair, data_shreds: &[Shred], is_last_in_slot: bool, + next_code_index: u32, process_stats: &mut ProcessShredsStats, ) -> Result> { if data_shreds.is_empty() { @@ -777,8 +866,26 @@ impl Shredder { thread_pool.borrow().install(|| { data_shreds .par_chunks(MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) - .flat_map(|shred_data_batch| { - Shredder::generate_coding_shreds(shred_data_batch, is_last_in_slot) + .enumerate() + .flat_map(|(i, shred_data_batch)| { + // Assumption here is that, for now, each fec block has + // as many coding shreds as data shreds (except for the + // last one in the slot). + // TODO: tie this more closely with + // generate_coding_shreds. + let next_code_index = next_code_index + .checked_add( + u32::try_from(i) + .unwrap() + .checked_mul(MAX_DATA_SHREDS_PER_FEC_BLOCK) + .unwrap(), + ) + .unwrap(); + Shredder::generate_coding_shreds( + shred_data_batch, + is_last_in_slot, + next_code_index, + ) }) .collect() }) @@ -812,8 +919,9 @@ impl Shredder { slot: Slot, index: u32, fec_set_index: u32, - num_data: usize, - num_code: usize, + num_data_shreds: u16, + num_coding_shreds: u16, + position: u16, version: u16, ) -> (ShredCommonHeader, CodingShredHeader) { let header = ShredCommonHeader { @@ -827,15 +935,19 @@ impl Shredder { ( header, CodingShredHeader { - num_data_shreds: num_data as u16, - num_coding_shreds: num_code as u16, - ..CodingShredHeader::default() + num_data_shreds, + num_coding_shreds, + position, }, ) } /// Generates coding shreds for the data shreds in the current FEC set - pub fn generate_coding_shreds(data: &[Shred], is_last_in_slot: bool) -> Vec { + pub fn generate_coding_shreds( + data: &[Shred], + is_last_in_slot: bool, + next_code_index: u32, + ) -> Vec { const PAYLOAD_ENCODE_SIZE: usize = SHRED_PAYLOAD_SIZE - SIZE_OF_CODING_SHRED_HEADERS; let ShredCommonHeader { slot, @@ -847,7 +959,7 @@ impl Shredder { assert_eq!(fec_set_index, index); assert!(data.iter().all(|shred| shred.common_header.slot == slot && shred.common_header.version == version - && shred.common_header.fec_set_index == fec_set_index)); + && shred.fec_set_index() == fec_set_index)); let num_data = data.len(); let num_coding = if is_last_in_slot { (2 * MAX_DATA_SHREDS_PER_FEC_BLOCK as usize) @@ -865,16 +977,20 @@ impl Shredder { .unwrap() .encode(&data, &mut parity[..]) .unwrap(); + let num_data = u16::try_from(num_data).unwrap(); + let num_coding = u16::try_from(num_coding).unwrap(); parity .iter() .enumerate() .map(|(i, parity)| { + let index = next_code_index + u32::try_from(i).unwrap(); let mut shred = Shred::new_empty_coding( slot, - fec_set_index + i as u32, // shred index + index, fec_set_index, num_data, num_coding, + u16::try_from(i).unwrap(), // position version, ); shred.payload[SIZE_OF_CODING_SHRED_HEADERS..].copy_from_slice(parity); @@ -890,7 +1006,7 @@ impl Shredder { Self::verify_consistent_shred_payload_sizes("try_recovery()", &shreds)?; let (slot, fec_set_index) = match shreds.first() { None => return Ok(Vec::default()), - Some(shred) => (shred.slot(), shred.common_header.fec_set_index), + Some(shred) => (shred.slot(), shred.fec_set_index()), }; let (num_data_shreds, num_coding_shreds) = match shreds.iter().find(|shred| shred.is_code()) { @@ -900,9 +1016,9 @@ impl Shredder { shred.coding_header.num_coding_shreds, ), }; - debug_assert!(shreds.iter().all( - |shred| shred.slot() == slot && shred.common_header.fec_set_index == fec_set_index - )); + debug_assert!(shreds + .iter() + .all(|shred| shred.slot() == slot && shred.fec_set_index() == fec_set_index)); debug_assert!(shreds .iter() .filter(|shred| shred.is_code()) @@ -1103,7 +1219,7 @@ pub fn verify_test_data_shred( assert!(shred.is_data()); assert_eq!(shred.index(), index); assert_eq!(shred.slot(), slot); - assert_eq!(shred.parent(), Some(parent)); + assert_eq!(shred.parent().unwrap(), parent); assert_eq!(verify, shred.verify(pk)); if is_last_in_slot { assert!(shred.last_in_slot()); @@ -1218,8 +1334,14 @@ pub mod tests { .saturating_sub(num_expected_data_shreds as usize) .max(num_expected_data_shreds as usize); let start_index = 0; - let (data_shreds, coding_shreds, next_index) = - shredder.entries_to_shreds(&keypair, &entries, true, start_index); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, + &entries, + true, // is_last_in_slot + start_index, // next_shred_index + start_index, // next_code_index + ); + let next_index = data_shreds.last().unwrap().index() + 1; assert_eq!(next_index as u64, num_expected_data_shreds); let mut data_shred_indexes = HashSet::new(); @@ -1288,8 +1410,11 @@ pub mod tests { }) .collect(); - let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; - + let (data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); let deserialized_shred = Shred::new_from_serialized_shred(data_shreds.last().unwrap().payload.clone()).unwrap(); assert_eq!(deserialized_shred, *data_shreds.last().unwrap()); @@ -1311,7 +1436,11 @@ pub mod tests { }) .collect(); - let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; + let (data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); data_shreds.iter().for_each(|s| { assert_eq!(s.reference_tick(), 5); assert_eq!(Shred::reference_tick_from_data(&s.payload), 5); @@ -1338,7 +1467,11 @@ pub mod tests { }) .collect(); - let data_shreds = shredder.entries_to_shreds(&keypair, &entries, true, 0).0; + let (data_shreds, _) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); data_shreds.iter().for_each(|s| { assert_eq!(s.reference_tick(), SHRED_TICK_REFERENCE_MASK); assert_eq!( @@ -1371,9 +1504,11 @@ pub mod tests { }) .collect(); - let (data_shreds, coding_shreds, _) = - shredder.entries_to_shreds(&keypair, &entries, true, 0); - + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); for (i, s) in data_shreds.iter().enumerate() { verify_test_data_shred( s, @@ -1420,11 +1555,12 @@ pub mod tests { .collect(); let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds, _) = shredder.entries_to_shreds( + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( &keypair, &entries, is_last_in_slot, 0, // next_shred_index + 0, // next_code_index ); let num_coding_shreds = coding_shreds.len(); @@ -1551,8 +1687,11 @@ pub mod tests { // Test5: Try recovery/reassembly with non zero index full slot with 3 missing data shreds // and 2 missing coding shreds. Hint: should work let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds, _) = - shredder.entries_to_shreds(&keypair, &entries, true, 25); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 25, // next_shred_index, + 25, // next_code_index + ); // We should have 10 shreds now assert_eq!(data_shreds.len(), num_data_shreds); @@ -1636,8 +1775,13 @@ pub mod tests { ) .unwrap(); let next_shred_index = rng.gen_range(1, 1024); - let (data_shreds, coding_shreds, _) = - shredder.entries_to_shreds(&keypair, &[entry], is_last_in_slot, next_shred_index); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, + &[entry], + is_last_in_slot, + next_shred_index, + next_shred_index, // next_code_index + ); let num_data_shreds = data_shreds.len(); let mut shreds = coding_shreds; shreds.extend(data_shreds.iter().cloned()); @@ -1690,8 +1834,11 @@ pub mod tests { }) .collect(); - let (data_shreds, coding_shreds, _next_index) = - shredder.entries_to_shreds(&keypair, &entries, true, 0); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); assert!(!data_shreds .iter() .chain(coding_shreds.iter()) @@ -1739,13 +1886,17 @@ pub mod tests { .collect(); let start_index = 0x12; - let (data_shreds, coding_shreds, _next_index) = - shredder.entries_to_shreds(&keypair, &entries, true, start_index); - + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, + &entries, + true, // is_last_in_slot + start_index, // next_shred_index + start_index, // next_code_index + ); let max_per_block = MAX_DATA_SHREDS_PER_FEC_BLOCK as usize; data_shreds.iter().enumerate().for_each(|(i, s)| { let expected_fec_set_index = start_index + ((i / max_per_block) * max_per_block) as u32; - assert_eq!(s.common_header.fec_set_index, expected_fec_set_index); + assert_eq!(s.fec_set_index(), expected_fec_set_index); }); coding_shreds.iter().enumerate().for_each(|(i, s)| { @@ -1753,7 +1904,7 @@ pub mod tests { while expected_fec_set_index as usize > data_shreds.len() { expected_fec_set_index -= max_per_block as u32; } - assert_eq!(s.common_header.fec_set_index, expected_fec_set_index); + assert_eq!(s.fec_set_index(), expected_fec_set_index); }); } @@ -1776,7 +1927,7 @@ pub mod tests { let mut stats = ProcessShredsStats::default(); let start_index = 0x12; - let (data_shreds, _next_index) = shredder.entries_to_data_shreds( + let data_shreds = shredder.entries_to_data_shreds( &keypair, &entries, true, // is_last_in_slot @@ -1786,12 +1937,14 @@ pub mod tests { ); assert!(data_shreds.len() > MAX_DATA_SHREDS_PER_FEC_BLOCK as usize); + let next_code_index = data_shreds[0].index(); (1..=MAX_DATA_SHREDS_PER_FEC_BLOCK as usize).for_each(|count| { let coding_shreds = Shredder::data_shreds_to_coding_shreds( &keypair, &data_shreds[..count], false, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1800,6 +1953,7 @@ pub mod tests { &keypair, &data_shreds[..count], true, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1813,6 +1967,7 @@ pub mod tests { &keypair, &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], false, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1824,6 +1979,7 @@ pub mod tests { &keypair, &data_shreds[..MAX_DATA_SHREDS_PER_FEC_BLOCK as usize + 1], true, // is_last_in_slot + next_code_index, &mut stats, ) .unwrap(); @@ -1840,12 +1996,13 @@ pub mod tests { shred.copy_to_packet(&mut packet); let shred_res = Shred::new_from_serialized_shred(packet.data.to_vec()); assert_matches!( - shred_res, + shred.parent(), Err(ShredError::InvalidParentOffset { slot: 10, parent_offset: 1000 }) ); + assert_matches!(shred_res, Err(ShredError::InvalidPayload)); } #[test] @@ -1882,7 +2039,15 @@ pub mod tests { ); assert_eq!(stats.index_overrun, 4); - let shred = Shred::new_empty_coding(8, 2, 10, 30, 4, 200); + let shred = Shred::new_empty_coding( + 8, // slot + 2, // index + 10, // fec_set_index + 30, // num_data + 4, // num_code + 1, // position + 200, // version + ); shred.copy_to_packet(&mut packet); assert_eq!( Some((8, 2, ShredType::Code)), @@ -1894,7 +2059,15 @@ pub mod tests { assert_eq!(None, get_shred_slot_index_type(&packet, &mut stats)); assert_eq!(1, stats.index_out_of_bounds); - let (header, coding_header) = Shredder::new_coding_shred_header(8, 2, 10, 30, 4, 200); + let (header, coding_header) = Shredder::new_coding_shred_header( + 8, // slot + 2, // index + 10, // fec_set_index + 30, // num_data_shreds + 4, // num_coding_shreds + 3, // position + 200, // version + ); let shred = Shred::new_empty_from_header(header, DataShredHeader::default(), coding_header); shred.copy_to_packet(&mut packet); packet.data[OFFSET_OF_SHRED_TYPE] = u8::MAX; diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 0df3d7b6b5cab3..3ad11e073450b3 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -12,10 +12,10 @@ use { solana_metrics::inc_new_counter_debug, solana_perf::{ cuda_runtime::PinnedVec, - packet::{limited_deserialize, Packet, Packets}, + packet::{limited_deserialize, Packet, PacketBatch}, perf_libs, recycler_cache::RecyclerCache, - sigverify::{self, batch_size, TxOffset}, + sigverify::{self, count_packets_in_batches, TxOffset}, }, solana_rayon_threadlimit::get_thread_count, solana_sdk::{ @@ -50,7 +50,7 @@ pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap) let slot_start = sig_end + size_of::(); let slot_end = slot_start + size_of::(); let msg_start = sig_end; - if packet.meta.discard { + if packet.meta.discard() { return Some(0); } trace!("slot start and end {} {}", slot_start, slot_end); @@ -58,7 +58,7 @@ pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap) return Some(0); } let slot: u64 = limited_deserialize(&packet.data[slot_start..slot_end]).ok()?; - let msg_end = if packet.meta.repair { + let msg_end = if packet.meta.repair() { packet.meta.size.saturating_sub(SIZE_OF_NONCE) } else { packet.meta.size @@ -76,22 +76,26 @@ pub fn verify_shred_cpu(packet: &Packet, slot_leaders: &HashMap) Some(1) } -fn verify_shreds_cpu(batches: &[Packets], slot_leaders: &HashMap) -> Vec> { +fn verify_shreds_cpu( + batches: &[PacketBatch], + slot_leaders: &HashMap, +) -> Vec> { use rayon::prelude::*; - let count = batch_size(batches); - debug!("CPU SHRED ECDSA for {}", count); + let packet_count = count_packets_in_batches(batches); + debug!("CPU SHRED ECDSA for {}", packet_count); let rv = SIGVERIFY_THREAD_POOL.install(|| { batches .into_par_iter() - .map(|p| { - p.packets + .map(|batch| { + batch + .packets .par_iter() .map(|p| verify_shred_cpu(p, slot_leaders).unwrap_or(0)) .collect() }) .collect() }); - inc_new_counter_debug!("ed25519_shred_verify_cpu", count); + inc_new_counter_debug!("ed25519_shred_verify_cpu", packet_count); rv } @@ -99,7 +103,7 @@ fn slot_key_data_for_gpu< T: Sync + Sized + Default + std::fmt::Debug + Eq + std::hash::Hash + Clone + Copy + AsRef<[u8]>, >( offset_start: usize, - batches: &[Packets], + batches: &[PacketBatch], slot_keys: &HashMap, recycler_cache: &RecyclerCache, ) -> (PinnedVec, TxOffset, usize) { @@ -108,13 +112,14 @@ fn slot_key_data_for_gpu< let slots: Vec> = SIGVERIFY_THREAD_POOL.install(|| { batches .into_par_iter() - .map(|p| { - p.packets + .map(|batch| { + batch + .packets .iter() .map(|packet| { let slot_start = size_of::() + size_of::(); let slot_end = slot_start + size_of::(); - if packet.meta.size < slot_end || packet.meta.discard { + if packet.meta.size < slot_end || packet.meta.discard() { return std::u64::MAX; } let slot: Option = @@ -173,7 +178,7 @@ fn vec_size_in_packets(keyvec: &PinnedVec) -> usize { } fn resize_vec(keyvec: &mut PinnedVec) -> usize { - //HACK: Pubkeys vector is passed along as a `Packets` buffer to the GPU + //HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU //TODO: GPU needs a more opaque interface, which can handle variable sized structures for data //Pad the Pubkeys buffer such that it is bigger than a buffer of Packet sized elems let num_in_packets = (keyvec.len() + (size_of::() - 1)) / size_of::(); @@ -183,7 +188,7 @@ fn resize_vec(keyvec: &mut PinnedVec) -> usize { fn shred_gpu_offsets( mut pubkeys_end: usize, - batches: &[Packets], + batches: &[PacketBatch], recycler_cache: &RecyclerCache, ) -> (TxOffset, TxOffset, TxOffset, Vec>) { let mut signature_offsets = recycler_cache.offsets().allocate("shred_signatures"); @@ -199,7 +204,7 @@ fn shred_gpu_offsets( let sig_start = pubkeys_end; let sig_end = sig_start + size_of::(); let msg_start = sig_end; - let msg_end = if packet.meta.repair { + let msg_end = if packet.meta.repair() { sig_start + packet.meta.size.saturating_sub(SIZE_OF_NONCE) } else { sig_start + packet.meta.size @@ -221,7 +226,7 @@ fn shred_gpu_offsets( } pub fn verify_shreds_gpu( - batches: &[Packets], + batches: &[PacketBatch], slot_leaders: &HashMap, recycler_cache: &RecyclerCache, ) -> Vec> { @@ -233,10 +238,10 @@ pub fn verify_shreds_gpu( let mut elems = Vec::new(); let mut rvs = Vec::new(); - let count = batch_size(batches); + let packet_count = count_packets_in_batches(batches); let (pubkeys, pubkey_offsets, mut num_packets) = slot_key_data_for_gpu(0, batches, slot_leaders, recycler_cache); - //HACK: Pubkeys vector is passed along as a `Packets` buffer to the GPU + //HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU //TODO: GPU needs a more opaque interface, which can handle variable sized structures for data let pubkeys_len = num_packets * size_of::(); trace!("num_packets: {}", num_packets); @@ -251,15 +256,15 @@ pub fn verify_shreds_gpu( num: num_packets as u32, }); - for p in batches { + for batch in batches { elems.push(perf_libs::Elems { - elems: p.packets.as_ptr(), - num: p.packets.len() as u32, + elems: batch.packets.as_ptr(), + num: batch.packets.len() as u32, }); let mut v = Vec::new(); - v.resize(p.packets.len(), 0); + v.resize(batch.packets.len(), 0); rvs.push(v); - num_packets += p.packets.len(); + num_packets += batch.packets.len(); } out.resize(signature_offsets.len(), 0); @@ -290,7 +295,7 @@ pub fn verify_shreds_gpu( sigverify::copy_return_values(&v_sig_lens, &out, &mut rvs); - inc_new_counter_debug!("ed25519_shred_verify_gpu", count); + inc_new_counter_debug!("ed25519_shred_verify_gpu", packet_count); rvs } @@ -316,18 +321,18 @@ fn sign_shred_cpu(keypair: &Keypair, packet: &mut Packet) { packet.data[0..sig_end].copy_from_slice(signature.as_ref()); } -pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [Packets]) { +pub fn sign_shreds_cpu(keypair: &Keypair, batches: &mut [PacketBatch]) { use rayon::prelude::*; - let count = batch_size(batches); - debug!("CPU SHRED ECDSA for {}", count); + let packet_count = count_packets_in_batches(batches); + debug!("CPU SHRED ECDSA for {}", packet_count); SIGVERIFY_THREAD_POOL.install(|| { - batches.par_iter_mut().for_each(|p| { - p.packets[..] + batches.par_iter_mut().for_each(|batch| { + batch.packets[..] .par_iter_mut() .for_each(|p| sign_shred_cpu(keypair, p)); }); }); - inc_new_counter_debug!("ed25519_shred_verify_cpu", count); + inc_new_counter_debug!("ed25519_shred_verify_cpu", packet_count); } pub fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache) -> PinnedVec { @@ -350,14 +355,14 @@ pub fn sign_shreds_gpu_pinned_keypair(keypair: &Keypair, cache: &RecyclerCache) pub fn sign_shreds_gpu( keypair: &Keypair, pinned_keypair: &Option>>, - batches: &mut [Packets], + batches: &mut [PacketBatch], recycler_cache: &RecyclerCache, ) { let sig_size = size_of::(); let pubkey_size = size_of::(); let api = perf_libs::api(); - let count = batch_size(batches); - if api.is_none() || count < SIGN_SHRED_GPU_MIN || pinned_keypair.is_none() { + let packet_count = count_packets_in_batches(batches); + if api.is_none() || packet_count < SIGN_SHRED_GPU_MIN || pinned_keypair.is_none() { return sign_shreds_cpu(keypair, batches); } let api = api.unwrap(); @@ -370,10 +375,10 @@ pub fn sign_shreds_gpu( //should be zero let mut pubkey_offsets = recycler_cache.offsets().allocate("pubkey offsets"); - pubkey_offsets.resize(count, 0); + pubkey_offsets.resize(packet_count, 0); let mut secret_offsets = recycler_cache.offsets().allocate("secret_offsets"); - secret_offsets.resize(count, pubkey_size as u32); + secret_offsets.resize(packet_count, pubkey_size as u32); trace!("offset: {}", offset); let (signature_offsets, msg_start_offsets, msg_sizes, _v_sig_lens) = @@ -388,14 +393,14 @@ pub fn sign_shreds_gpu( num: num_keypair_packets as u32, }); - for p in batches.iter() { + for batch in batches.iter() { elems.push(perf_libs::Elems { - elems: p.packets.as_ptr(), - num: p.packets.len() as u32, + elems: batch.packets.as_ptr(), + num: batch.packets.len() as u32, }); let mut v = Vec::new(); - v.resize(p.packets.len(), 0); - num_packets += p.packets.len(); + v.resize(batch.packets.len(), 0); + num_packets += batch.packets.len(); } trace!("Starting verify num packets: {}", num_packets); @@ -447,7 +452,7 @@ pub fn sign_shreds_gpu( }); }); }); - inc_new_counter_debug!("ed25519_shred_sign_gpu", count); + inc_new_counter_debug!("ed25519_shred_sign_gpu", packet_count); } #[cfg(test)] @@ -506,7 +511,7 @@ pub mod tests { fn run_test_sigverify_shreds_cpu(slot: Slot) { solana_logger::setup(); - let mut batch = [Packets::default()]; + let mut batches = [PacketBatch::default()]; let mut shred = Shred::new_from_data( slot, 0xc0de, @@ -520,15 +525,15 @@ pub mod tests { ); let keypair = Keypair::new(); Shredder::sign_shred(&keypair, &mut shred); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let leader_slots = [(slot, keypair.pubkey().to_bytes())] .iter() .cloned() .collect(); - let rv = verify_shreds_cpu(&batch, &leader_slots); + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![1]]); let wrong_keypair = Keypair::new(); @@ -536,19 +541,19 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_cpu(&batch, &leader_slots); + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![0]]); let leader_slots = HashMap::new(); - let rv = verify_shreds_cpu(&batch, &leader_slots); + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![0]]); let leader_slots = [(slot, keypair.pubkey().to_bytes())] .iter() .cloned() .collect(); - batch[0].packets[0].meta.size = 0; - let rv = verify_shreds_cpu(&batch, &leader_slots); + batches[0].packets[0].meta.size = 0; + let rv = verify_shreds_cpu(&batches, &leader_slots); assert_eq!(rv, vec![vec![0]]); } @@ -561,7 +566,7 @@ pub mod tests { solana_logger::setup(); let recycler_cache = RecyclerCache::default(); - let mut batch = [Packets::default()]; + let mut batches = [PacketBatch::default()]; let mut shred = Shred::new_from_data( slot, 0xc0de, @@ -575,9 +580,9 @@ pub mod tests { ); let keypair = Keypair::new(); Shredder::sign_shred(&keypair, &mut shred); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let leader_slots = [ (std::u64::MAX, Pubkey::default().to_bytes()), @@ -586,7 +591,7 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![1]]); let wrong_keypair = Keypair::new(); @@ -597,14 +602,14 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![0]]); let leader_slots = [(std::u64::MAX, [0u8; 32])].iter().cloned().collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![0]]); - batch[0].packets[0].meta.size = 0; + batches[0].packets[0].meta.size = 0; let leader_slots = [ (std::u64::MAX, Pubkey::default().to_bytes()), (slot, keypair.pubkey().to_bytes()), @@ -612,7 +617,7 @@ pub mod tests { .iter() .cloned() .collect(); - let rv = verify_shreds_gpu(&batch, &leader_slots, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &leader_slots, &recycler_cache); assert_eq!(rv, vec![vec![0]]); } @@ -625,11 +630,11 @@ pub mod tests { solana_logger::setup(); let recycler_cache = RecyclerCache::default(); - let mut packets = Packets::default(); + let mut packet_batch = PacketBatch::default(); let num_packets = 32; let num_batches = 100; - packets.packets.resize(num_packets, Packet::default()); - for (i, p) in packets.packets.iter_mut().enumerate() { + packet_batch.packets.resize(num_packets, Packet::default()); + for (i, p) in packet_batch.packets.iter_mut().enumerate() { let shred = Shred::new_from_data( slot, 0xc0de, @@ -643,7 +648,7 @@ pub mod tests { ); shred.copy_to_packet(p); } - let mut batch = vec![packets; num_batches]; + let mut batches = vec![packet_batch; num_batches]; let keypair = Keypair::new(); let pinned_keypair = sign_shreds_gpu_pinned_keypair(&keypair, &recycler_cache); let pinned_keypair = Some(Arc::new(pinned_keypair)); @@ -655,14 +660,14 @@ pub mod tests { .cloned() .collect(); //unsigned - let rv = verify_shreds_gpu(&batch, &pubkeys, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &pubkeys, &recycler_cache); assert_eq!(rv, vec![vec![0; num_packets]; num_batches]); //signed - sign_shreds_gpu(&keypair, &pinned_keypair, &mut batch, &recycler_cache); - let rv = verify_shreds_cpu(&batch, &pubkeys); + sign_shreds_gpu(&keypair, &pinned_keypair, &mut batches, &recycler_cache); + let rv = verify_shreds_cpu(&batches, &pubkeys); assert_eq!(rv, vec![vec![1; num_packets]; num_batches]); - let rv = verify_shreds_gpu(&batch, &pubkeys, &recycler_cache); + let rv = verify_shreds_gpu(&batches, &pubkeys, &recycler_cache); assert_eq!(rv, vec![vec![1; num_packets]; num_batches]); } @@ -674,7 +679,7 @@ pub mod tests { fn run_test_sigverify_shreds_sign_cpu(slot: Slot) { solana_logger::setup(); - let mut batch = [Packets::default()]; + let mut batches = [PacketBatch::default()]; let keypair = Keypair::new(); let shred = Shred::new_from_data( slot, @@ -687,9 +692,9 @@ pub mod tests { 0, 0xc0de, ); - batch[0].packets.resize(1, Packet::default()); - batch[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); - batch[0].packets[0].meta.size = shred.payload.len(); + batches[0].packets.resize(1, Packet::default()); + batches[0].packets[0].data[0..shred.payload.len()].copy_from_slice(&shred.payload); + batches[0].packets[0].meta.size = shred.payload.len(); let pubkeys = [ (slot, keypair.pubkey().to_bytes()), (std::u64::MAX, Pubkey::default().to_bytes()), @@ -698,11 +703,11 @@ pub mod tests { .cloned() .collect(); //unsigned - let rv = verify_shreds_cpu(&batch, &pubkeys); + let rv = verify_shreds_cpu(&batches, &pubkeys); assert_eq!(rv, vec![vec![0]]); //signed - sign_shreds_cpu(&keypair, &mut batch); - let rv = verify_shreds_cpu(&batch, &pubkeys); + sign_shreds_cpu(&keypair, &mut batches); + let rv = verify_shreds_cpu(&batches, &pubkeys); assert_eq!(rv, vec![vec![1]]); } diff --git a/ledger/tests/shred.rs b/ledger/tests/shred.rs index fe09937734fe8c..7a1cf976d8afdf 100644 --- a/ledger/tests/shred.rs +++ b/ledger/tests/shred.rs @@ -48,8 +48,12 @@ fn test_multi_fec_block_coding() { .collect(); let serialized_entries = bincode::serialize(&entries).unwrap(); - let (data_shreds, coding_shreds, next_index) = - shredder.entries_to_shreds(&keypair, &entries, true, 0); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, true, // is_last_in_slot + 0, // next_shred_index + 0, // next_code_index + ); + let next_index = data_shreds.last().unwrap().index() + 1; assert_eq!(next_index as usize, num_data_shreds); assert_eq!(data_shreds.len(), num_data_shreds); assert_eq!(coding_shreds.len(), num_data_shreds); @@ -218,8 +222,10 @@ fn setup_different_sized_fec_blocks( let total_num_data_shreds: usize = 2 * num_shreds_per_iter; for i in 0..2 { let is_last = i == 1; - let (data_shreds, coding_shreds, new_next_index) = - shredder.entries_to_shreds(&keypair, &entries, is_last, next_index); + let (data_shreds, coding_shreds) = shredder.entries_to_shreds( + &keypair, &entries, is_last, next_index, // next_shred_index + next_index, // next_code_index + ); for shred in &data_shreds { if (shred.index() as usize) == total_num_data_shreds - 1 { assert!(shred.data_complete()); @@ -232,7 +238,7 @@ fn setup_different_sized_fec_blocks( } } assert_eq!(data_shreds.len(), num_shreds_per_iter as usize); - next_index = new_next_index; + next_index = data_shreds.last().unwrap().index() + 1; sort_data_coding_into_fec_sets( data_shreds, coding_shreds, diff --git a/local-cluster/Cargo.toml b/local-cluster/Cargo.toml index e2dfdaa4a520f0..892436ecc3e2ef 100644 --- a/local-cluster/Cargo.toml +++ b/local-cluster/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-local-cluster" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -17,19 +17,19 @@ fs_extra = "1.2.0" log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" -solana-config-program = { path = "../programs/config", version = "=1.9.0" } -solana-core = { path = "../core", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-download-utils = { path = "../download-utils", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-config-program = { path = "../programs/config", version = "=1.9.4" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-download-utils = { path = "../download-utils", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } tempfile = "3.2.0" [dev-dependencies] diff --git a/local-cluster/src/cluster_tests.rs b/local-cluster/src/cluster_tests.rs index 1d7892a992175d..8c393e2f24de55 100644 --- a/local-cluster/src/cluster_tests.rs +++ b/local-cluster/src/cluster_tests.rs @@ -441,7 +441,7 @@ pub fn submit_vote_to_cluster_gossip( vec![CrdsValue::new_signed( CrdsData::Vote( 0, - crds_value::Vote::new(node_keypair.pubkey(), vote_tx, timestamp()), + crds_value::Vote::new(node_keypair.pubkey(), vote_tx, timestamp()).unwrap(), ), node_keypair, )], diff --git a/local-cluster/src/local_cluster.rs b/local-cluster/src/local_cluster.rs index ba53a3eb140dbd..18ad89790fdbca 100644 --- a/local-cluster/src/local_cluster.rs +++ b/local-cluster/src/local_cluster.rs @@ -281,7 +281,7 @@ impl LocalCluster { let mut listener_config = safe_clone_config(&config.validator_configs[0]); listener_config.voting_disabled = true; (0..config.num_listeners).for_each(|_| { - cluster.add_validator( + cluster.add_validator_listener( &listener_config, 0, Arc::new(Keypair::new()), @@ -324,11 +324,50 @@ impl LocalCluster { } } + /// Set up validator without voting or staking accounts + pub fn add_validator_listener( + &mut self, + validator_config: &ValidatorConfig, + stake: u64, + validator_keypair: Arc, + voting_keypair: Option>, + socket_addr_space: SocketAddrSpace, + ) -> Pubkey { + self.do_add_validator( + validator_config, + true, + stake, + validator_keypair, + voting_keypair, + socket_addr_space, + ) + } + + /// Set up validator with voting and staking accounts pub fn add_validator( &mut self, validator_config: &ValidatorConfig, stake: u64, validator_keypair: Arc, + voting_keypair: Option>, + socket_addr_space: SocketAddrSpace, + ) -> Pubkey { + self.do_add_validator( + validator_config, + false, + stake, + validator_keypair, + voting_keypair, + socket_addr_space, + ) + } + + fn do_add_validator( + &mut self, + validator_config: &ValidatorConfig, + is_listener: bool, + stake: u64, + validator_keypair: Arc, mut voting_keypair: Option>, socket_addr_space: SocketAddrSpace, ) -> Pubkey { @@ -347,30 +386,28 @@ impl LocalCluster { let contact_info = validator_node.info.clone(); let (ledger_path, _blockhash) = create_new_tmp_ledger!(&self.genesis_config); - if validator_config.voting_disabled { + // Give the validator some lamports to setup vote accounts + if is_listener { // setup as a listener info!("listener {} ", validator_pubkey,); - } else { - // Give the validator some lamports to setup vote accounts - if should_create_vote_pubkey { - let validator_balance = Self::transfer_with_client( - &client, - &self.funding_keypair, - &validator_pubkey, - stake * 2 + 2, - ); - info!( - "validator {} balance {}", - validator_pubkey, validator_balance - ); - Self::setup_vote_and_stake_accounts( - &client, - voting_keypair.as_ref().unwrap(), - &validator_keypair, - stake, - ) - .unwrap(); - } + } else if should_create_vote_pubkey { + let validator_balance = Self::transfer_with_client( + &client, + &self.funding_keypair, + &validator_pubkey, + stake * 2 + 2, + ); + info!( + "validator {} balance {}", + validator_pubkey, validator_balance + ); + Self::setup_vote_and_stake_accounts( + &client, + voting_keypair.as_ref().unwrap(), + &validator_keypair, + stake, + ) + .unwrap(); } let mut config = safe_clone_config(validator_config); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index 9df61a5001db5b..8c261be7c258a8 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -2759,7 +2759,7 @@ fn test_no_voting() { let meta = ledger.meta(i as u64).unwrap().unwrap(); let parent = meta.parent_slot; let expected_parent = i.saturating_sub(1); - assert_eq!(parent, expected_parent as u64); + assert_eq!(parent, Some(expected_parent as u64)); } } @@ -3190,13 +3190,27 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b let (validator_a_pubkey, validator_b_pubkey, validator_c_pubkey) = (validators[0], validators[1], validators[2]); + // Disable voting on all validators other than validator B to ensure neither of the below two + // scenarios occur: + // 1. If the cluster immediately forks on restart while we're killing validators A and C, + // with Validator B on one side, and `A` and `C` on a heavier fork, it's possible that the lockouts + // on `A` and `C`'s latest votes do not extend past validator B's latest vote. Then validator B + // will be stuck unable to vote, but also unable generate a switching proof to the heavier fork. + // + // 2. Validator A doesn't vote past `next_slot_on_a` before we can kill it. This is essential + // because if validator A votes past `next_slot_on_a`, and then we copy over validator B's ledger + // below only for slots <= `next_slot_on_a`, validator A will not know how it's last vote chains + // to the otehr forks, and may violate switching proofs on restart. + let mut validator_configs = + make_identical_validator_configs(&ValidatorConfig::default(), node_stakes.len()); + + validator_configs[0].voting_disabled = true; + validator_configs[2].voting_disabled = true; + let mut config = ClusterConfig { cluster_lamports: 100_000, - node_stakes: node_stakes.clone(), - validator_configs: make_identical_validator_configs( - &ValidatorConfig::default(), - node_stakes.len(), - ), + node_stakes, + validator_configs, validator_keys: Some(validator_keys), slots_per_epoch, stakers_slot_offset: slots_per_epoch, @@ -3213,9 +3227,23 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b let val_b_ledger_path = cluster.ledger_path(&validator_b_pubkey); let val_c_ledger_path = cluster.ledger_path(&validator_c_pubkey); + info!( + "val_a {} ledger path {:?}", + validator_a_pubkey, val_a_ledger_path + ); + info!( + "val_b {} ledger path {:?}", + validator_b_pubkey, val_b_ledger_path + ); + info!( + "val_c {} ledger path {:?}", + validator_c_pubkey, val_c_ledger_path + ); + // Immediately kill validator A, and C - let validator_a_info = cluster.exit_node(&validator_a_pubkey); - let validator_c_info = cluster.exit_node(&validator_c_pubkey); + info!("Exiting validators A and C"); + let mut validator_a_info = cluster.exit_node(&validator_a_pubkey); + let mut validator_c_info = cluster.exit_node(&validator_c_pubkey); // Step 1: // Let validator B, (D) run for a while. @@ -3224,7 +3252,8 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b let elapsed = now.elapsed(); assert!( elapsed <= Duration::from_secs(30), - "LocalCluster nodes failed to log enough tower votes in {} secs", + "Validator B failed to vote on any slot >= {} in {} secs", + next_slot_on_a, elapsed.as_secs() ); sleep(Duration::from_millis(100)); @@ -3269,29 +3298,38 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b } // Step 3: - // Restart A so that it can vote for the slots in B's fork + // Restart A with voting enabled so that it can vote on B's fork + // up to `next_slot_on_a`, thereby optimistcally confirming `next_slot_on_a` info!("Restarting A"); + validator_a_info.config.voting_disabled = false; cluster.restart_node( &validator_a_pubkey, validator_a_info, SocketAddrSpace::Unspecified, ); - info!("Waiting for A to vote"); - let mut last_print = Instant::now(); + info!("Waiting for A to vote on slot descended from slot `next_slot_on_a`"); + let now = Instant::now(); loop { if let Some((last_vote_slot, _)) = last_vote_in_tower(&val_a_ledger_path, &validator_a_pubkey) { if last_vote_slot >= next_slot_on_a { - info!("Validator A has caught up: {}", last_vote_slot); + info!( + "Validator A has caught up and voted on slot: {}", + last_vote_slot + ); break; - } else if last_print.elapsed().as_secs() >= 10 { - info!("Validator A latest vote: {}", last_vote_slot); - last_print = Instant::now(); } } + if now.elapsed().as_secs() >= 30 { + panic!( + "Validator A has not seen optimistic confirmation slot > {} in 30 seconds", + next_slot_on_a + ); + } + sleep(Duration::from_millis(20)); } @@ -3319,6 +3357,7 @@ fn do_test_optimistic_confirmation_violation_with_or_without_tower(with_tower: b // Step 4: // Run validator C only to make it produce and vote on its own fork. info!("Restart validator C again!!!"); + validator_c_info.config.voting_disabled = false; cluster.restart_node( &validator_c_pubkey, validator_c_info, diff --git a/log-analyzer/Cargo.toml b/log-analyzer/Cargo.toml index 33f868fb0d8964..4b2cc5d9278357 100644 --- a/log-analyzer/Cargo.toml +++ b/log-analyzer/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-log-analyzer" description = "The solana cluster network analysis tool" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,8 +14,8 @@ byte-unit = "4.0.13" clap = "2.33.1" serde = "1.0.130" serde_json = "1.0.72" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [[bin]] name = "solana-log-analyzer" diff --git a/logger/Cargo.toml b/logger/Cargo.toml index 5a5dfcfa22d7d6..951bfbf4cd6226 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-logger" -version = "1.9.0" +version = "1.9.4" description = "Solana Logger" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/measure/Cargo.toml b/measure/Cargo.toml index fd77b22c815df3..c7f40cd0d08472 100644 --- a/measure/Cargo.toml +++ b/measure/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-measure" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-measure" readme = "../README.md" @@ -12,7 +12,7 @@ edition = "2021" [dependencies] log = "0.4.14" -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/merkle-root-bench/Cargo.toml b/merkle-root-bench/Cargo.toml index 8e9831c5e1802c..4f02d86eb505be 100644 --- a/merkle-root-bench/Cargo.toml +++ b/merkle-root-bench/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-merkle-root-bench" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -10,11 +10,11 @@ publish = false [dependencies] log = "0.4.14" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } clap = "2.33.1" [package.metadata.docs.rs] diff --git a/merkle-tree/Cargo.toml b/merkle-tree/Cargo.toml index d93adb8e64731e..eef162ff4c0989 100644 --- a/merkle-tree/Cargo.toml +++ b/merkle-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-merkle-tree" -version = "1.9.0" +version = "1.9.4" description = "Solana Merkle Tree" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-merkle-tree" edition = "2021" [dependencies] -solana-program = { path = "../sdk/program", version = "=1.9.0" } +solana-program = { path = "../sdk/program", version = "=1.9.4" } fast-math = "0.1" # This can go once the BPF toolchain target Rust 1.42.0+ diff --git a/metrics/Cargo.toml b/metrics/Cargo.toml index 1684ac904ff831..ca2dbb34544d9f 100644 --- a/metrics/Cargo.toml +++ b/metrics/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-metrics" -version = "1.9.0" +version = "1.9.4" description = "Solana Metrics" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,7 +15,7 @@ gethostname = "0.2.1" lazy_static = "1.4.0" log = "0.4.14" reqwest = { version = "0.11.6", default-features = false, features = ["blocking", "rustls-tls", "json"] } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } [dev-dependencies] rand = "0.7.0" diff --git a/net-shaper/Cargo.toml b/net-shaper/Cargo.toml index 243d846d3ff920..94ca60c998e6cf 100644 --- a/net-shaper/Cargo.toml +++ b/net-shaper/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-net-shaper" description = "The solana cluster network shaping tool" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,8 +13,8 @@ publish = false clap = "2.33.1" serde = "1.0.130" serde_json = "1.0.72" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } rand = "0.7.0" [[bin]] diff --git a/net-utils/Cargo.toml b/net-utils/Cargo.toml index 5c2497a9882d1b..32ab3c2fb65d02 100644 --- a/net-utils/Cargo.toml +++ b/net-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-net-utils" -version = "1.9.0" +version = "1.9.4" description = "Solana Network Utilities" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -18,9 +18,9 @@ rand = "0.7.0" serde = "1.0.130" serde_derive = "1.0.103" socket2 = "0.4.2" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } tokio = { version = "1", features = ["full"] } url = "2.2.2" diff --git a/notifier/Cargo.toml b/notifier/Cargo.toml index 3aabfbbc2dc56b..b08b3eec88bcb5 100644 --- a/notifier/Cargo.toml +++ b/notifier/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-notifier" -version = "1.9.0" +version = "1.9.4" description = "Solana Notifier" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/perf/Cargo.toml b/perf/Cargo.toml index a1a2737adb810a..c917bf21e51ac4 100644 --- a/perf/Cargo.toml +++ b/perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-perf" -version = "1.9.0" +version = "1.9.4" description = "Solana Performance APIs" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,11 +19,11 @@ log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" serde = "1.0.130" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } [target."cfg(target_os = \"linux\")".dependencies] caps = "0.5.3" diff --git a/perf/benches/recycler.rs b/perf/benches/recycler.rs index 63410ffc856b8d..0533e4a11eb3a2 100644 --- a/perf/benches/recycler.rs +++ b/perf/benches/recycler.rs @@ -3,7 +3,7 @@ extern crate test; use { - solana_perf::{packet::PacketsRecycler, recycler::Recycler}, + solana_perf::{packet::PacketBatchRecycler, recycler::Recycler}, test::Bencher, }; @@ -11,7 +11,7 @@ use { fn bench_recycler(bencher: &mut Bencher) { solana_logger::setup(); - let recycler: PacketsRecycler = Recycler::default(); + let recycler: PacketBatchRecycler = Recycler::default(); for _ in 0..1000 { let _packet = recycler.allocate(""); diff --git a/perf/benches/sigverify.rs b/perf/benches/sigverify.rs index a3211cade62b18..7c60f362b7a1a8 100644 --- a/perf/benches/sigverify.rs +++ b/perf/benches/sigverify.rs @@ -3,7 +3,7 @@ extern crate test; use { - solana_perf::{packet::to_packets_chunked, recycler::Recycler, sigverify, test_tx::test_tx}, + solana_perf::{packet::to_packet_batches, recycler::Recycler, sigverify, test_tx::test_tx}, test::Bencher, }; @@ -12,7 +12,7 @@ fn bench_sigverify(bencher: &mut Bencher) { let tx = test_tx(); // generate packet vector - let mut batches = to_packets_chunked(&std::iter::repeat(tx).take(128).collect::>(), 128); + let mut batches = to_packet_batches(&std::iter::repeat(tx).take(128).collect::>(), 128); let recycler = Recycler::default(); let recycler_out = Recycler::default(); @@ -28,7 +28,7 @@ fn bench_get_offsets(bencher: &mut Bencher) { // generate packet vector let mut batches = - to_packets_chunked(&std::iter::repeat(tx).take(1024).collect::>(), 1024); + to_packet_batches(&std::iter::repeat(tx).take(1024).collect::>(), 1024); let recycler = Recycler::default(); // verify packets diff --git a/perf/src/data_budget.rs b/perf/src/data_budget.rs index 24eb0bb84ec5cc..4c35fc6ce35caa 100644 --- a/perf/src/data_budget.rs +++ b/perf/src/data_budget.rs @@ -10,6 +10,14 @@ pub struct DataBudget { } impl DataBudget { + /// Create a data budget with max bytes, used for tests + pub fn restricted() -> Self { + Self { + bytes: AtomicUsize::default(), + last_timestamp_ms: AtomicU64::new(u64::MAX), + } + } + // If there are enough bytes in the budget, consumes from // the budget and returns true. Otherwise returns false. #[must_use] diff --git a/perf/src/packet.rs b/perf/src/packet.rs index 59f9d8f7dfce34..7b2222a0f1cc4b 100644 --- a/perf/src/packet.rs +++ b/perf/src/packet.rs @@ -1,5 +1,5 @@ //! The `packet` module defines data structures and methods to pull data from the network. -pub use solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE}; +pub use solana_sdk::packet::{Meta, Packet, PacketFlags, PACKET_DATA_SIZE}; use { crate::{cuda_runtime::PinnedVec, recycler::Recycler}, bincode::config::Options, @@ -13,13 +13,13 @@ pub const PACKETS_PER_BATCH: usize = 128; pub const NUM_RCVMMSGS: usize = 128; #[derive(Debug, Default, Clone)] -pub struct Packets { +pub struct PacketBatch { pub packets: PinnedVec, } -pub type PacketsRecycler = Recycler>; +pub type PacketBatchRecycler = Recycler>; -impl Packets { +impl PacketBatch { pub fn new(packets: Vec) -> Self { let packets = PinnedVec::from_vec(packets); Self { packets } @@ -27,48 +27,52 @@ impl Packets { pub fn with_capacity(capacity: usize) -> Self { let packets = PinnedVec::with_capacity(capacity); - Packets { packets } + PacketBatch { packets } } pub fn new_unpinned_with_recycler( - recycler: PacketsRecycler, + recycler: PacketBatchRecycler, size: usize, name: &'static str, ) -> Self { let mut packets = recycler.allocate(name); packets.reserve(size); - Packets { packets } + PacketBatch { packets } } - pub fn new_with_recycler(recycler: PacketsRecycler, size: usize, name: &'static str) -> Self { + pub fn new_with_recycler( + recycler: PacketBatchRecycler, + size: usize, + name: &'static str, + ) -> Self { let mut packets = recycler.allocate(name); packets.reserve_and_pin(size); - Packets { packets } + PacketBatch { packets } } pub fn new_with_recycler_data( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, name: &'static str, mut packets: Vec, ) -> Self { - let mut vec = Self::new_with_recycler(recycler.clone(), packets.len(), name); - vec.packets.append(&mut packets); - vec + let mut batch = Self::new_with_recycler(recycler.clone(), packets.len(), name); + batch.packets.append(&mut packets); + batch } pub fn new_unpinned_with_recycler_data( - recycler: &PacketsRecycler, + recycler: &PacketBatchRecycler, name: &'static str, mut packets: Vec, ) -> Self { - let mut vec = Self::new_unpinned_with_recycler(recycler.clone(), packets.len(), name); - vec.packets.append(&mut packets); - vec + let mut batch = Self::new_unpinned_with_recycler(recycler.clone(), packets.len(), name); + batch.packets.append(&mut packets); + batch } pub fn set_addr(&mut self, addr: &SocketAddr) { - for m in self.packets.iter_mut() { - m.meta.set_addr(addr); + for p in self.packets.iter_mut() { + p.meta.set_addr(addr); } } @@ -77,32 +81,32 @@ impl Packets { } } -pub fn to_packets_chunked(xs: &[T], chunks: usize) -> Vec { +pub fn to_packet_batches(xs: &[T], chunks: usize) -> Vec { let mut out = vec![]; for x in xs.chunks(chunks) { - let mut p = Packets::with_capacity(x.len()); - p.packets.resize(x.len(), Packet::default()); - for (i, o) in x.iter().zip(p.packets.iter_mut()) { - Packet::populate_packet(o, None, i).expect("serialize request"); + let mut batch = PacketBatch::with_capacity(x.len()); + batch.packets.resize(x.len(), Packet::default()); + for (i, packet) in x.iter().zip(batch.packets.iter_mut()) { + Packet::populate_packet(packet, None, i).expect("serialize request"); } - out.push(p); + out.push(batch); } out } #[cfg(test)] -pub fn to_packets(xs: &[T]) -> Vec { - to_packets_chunked(xs, NUM_PACKETS) +pub fn to_packet_batches_for_tests(xs: &[T]) -> Vec { + to_packet_batches(xs, NUM_PACKETS) } -pub fn to_packets_with_destination( - recycler: PacketsRecycler, +pub fn to_packet_batch_with_destination( + recycler: PacketBatchRecycler, dests_and_data: &[(SocketAddr, T)], -) -> Packets { - let mut out = Packets::new_unpinned_with_recycler( +) -> PacketBatch { + let mut out = PacketBatch::new_unpinned_with_recycler( recycler, dests_and_data.len(), - "to_packets_with_destination", + "to_packet_batch_with_destination", ); out.packets.resize(dests_and_data.len(), Packet::default()); for (dest_and_data, o) in dests_and_data.iter().zip(out.packets.iter_mut()) { @@ -143,21 +147,21 @@ mod tests { }; #[test] - fn test_to_packets() { + fn test_to_packet_batches() { let keypair = Keypair::new(); let hash = Hash::new(&[1; 32]); let tx = system_transaction::transfer(&keypair, &keypair.pubkey(), 1, hash); - let rv = to_packets(&[tx.clone(); 1]); + let rv = to_packet_batches_for_tests(&[tx.clone(); 1]); assert_eq!(rv.len(), 1); assert_eq!(rv[0].packets.len(), 1); #[allow(clippy::useless_vec)] - let rv = to_packets(&vec![tx.clone(); NUM_PACKETS]); + let rv = to_packet_batches_for_tests(&vec![tx.clone(); NUM_PACKETS]); assert_eq!(rv.len(), 1); assert_eq!(rv[0].packets.len(), NUM_PACKETS); #[allow(clippy::useless_vec)] - let rv = to_packets(&vec![tx; NUM_PACKETS + 1]); + let rv = to_packet_batches_for_tests(&vec![tx; NUM_PACKETS + 1]); assert_eq!(rv.len(), 2); assert_eq!(rv[0].packets.len(), NUM_PACKETS); assert_eq!(rv[1].packets.len(), 1); @@ -165,9 +169,10 @@ mod tests { #[test] fn test_to_packets_pinning() { - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); for i in 0..2 { - let _first_packets = Packets::new_with_recycler(recycler.clone(), i + 1, "first one"); + let _first_packets = + PacketBatch::new_with_recycler(recycler.clone(), i + 1, "first one"); } } } diff --git a/perf/src/recycler.rs b/perf/src/recycler.rs index 154d8832802f6b..a8169ab5564b6a 100644 --- a/perf/src/recycler.rs +++ b/perf/src/recycler.rs @@ -182,7 +182,7 @@ impl RecyclerX { #[cfg(test)] mod tests { - use {super::*, crate::packet::PacketsRecycler, std::iter::repeat_with}; + use {super::*, crate::packet::PacketBatchRecycler, std::iter::repeat_with}; impl Reset for u64 { fn reset(&mut self) { @@ -209,7 +209,7 @@ mod tests { #[test] fn test_recycler_shrink() { let mut rng = rand::thread_rng(); - let recycler = PacketsRecycler::default(); + let recycler = PacketBatchRecycler::default(); // Allocate a burst of packets. const NUM_PACKETS: usize = RECYCLER_SHRINK_SIZE * 2; { diff --git a/perf/src/sigverify.rs b/perf/src/sigverify.rs index 6102c69af1d05f..8396bb529b1c6a 100644 --- a/perf/src/sigverify.rs +++ b/perf/src/sigverify.rs @@ -9,7 +9,7 @@ use solana_sdk::transaction::Transaction; use { crate::{ cuda_runtime::PinnedVec, - packet::{Packet, Packets}, + packet::{Packet, PacketBatch, PacketFlags}, perf_libs, recycler::Recycler, }, @@ -114,17 +114,17 @@ fn verify_packet(packet: &mut Packet, reject_non_vote: bool) { let msg_start = packet_offsets.msg_start as usize; // If this packet was already marked as discard, drop it - if packet.meta.discard { + if packet.meta.discard() { return; } if packet_offsets.sig_len == 0 { - packet.meta.discard = true; + packet.meta.set_discard(true); return; } if packet.meta.size <= msg_start { - packet.meta.discard = true; + packet.meta.set_discard(true); return; } @@ -142,15 +142,15 @@ fn verify_packet(packet: &mut Packet, reject_non_vote: bool) { &packet.data[pubkey_start..pubkey_end], &packet.data[msg_start..msg_end], ) { - packet.meta.discard = true; + packet.meta.set_discard(true); return; } // Check for tracer pubkey - if !packet.meta.is_tracer_tx + if !packet.meta.is_tracer_tx() && &packet.data[pubkey_start..pubkey_end] == TRACER_KEY.as_ref() { - packet.meta.is_tracer_tx = true; + packet.meta.flags |= PacketFlags::TRACER_TX; } pubkey_start = pubkey_end; @@ -158,8 +158,8 @@ fn verify_packet(packet: &mut Packet, reject_non_vote: bool) { } } -pub fn batch_size(batches: &[Packets]) -> usize { - batches.iter().map(|p| p.packets.len()).sum() +pub fn count_packets_in_batches(batches: &[PacketBatch]) -> usize { + batches.iter().map(|batch| batch.packets.len()).sum() } // internal function to be unit-tested; should be used only by get_packet_offsets @@ -289,7 +289,7 @@ fn get_packet_offsets( let unsanitized_packet_offsets = do_get_packet_offsets(packet, current_offset); if let Ok(offsets) = unsanitized_packet_offsets { check_for_simple_vote_transaction(packet, &offsets, current_offset).ok(); - if !reject_non_vote || packet.meta.is_simple_vote_tx { + if !reject_non_vote || packet.meta.is_simple_vote_tx() { return offsets; } } @@ -360,13 +360,13 @@ fn check_for_simple_vote_transaction( if &packet.data[instruction_program_id_start..instruction_program_id_end] == solana_sdk::vote::program::id().as_ref() { - packet.meta.is_simple_vote_tx = true; + packet.meta.flags |= PacketFlags::SIMPLE_VOTE_TX; } Ok(()) } pub fn generate_offsets( - batches: &mut [Packets], + batches: &mut [PacketBatch], recycler: &Recycler, reject_non_vote: bool, ) -> TxOffsets { @@ -381,9 +381,9 @@ pub fn generate_offsets( msg_sizes.set_pinnable(); let mut current_offset: usize = 0; let mut v_sig_lens = Vec::new(); - batches.iter_mut().for_each(|p| { + batches.iter_mut().for_each(|batch| { let mut sig_lens = Vec::new(); - p.packets.iter_mut().for_each(|packet| { + batch.packets.iter_mut().for_each(|packet| { let packet_offsets = get_packet_offsets(packet, current_offset, reject_non_vote); sig_lens.push(packet_offsets.sig_len); @@ -418,30 +418,32 @@ pub fn generate_offsets( ) } -pub fn ed25519_verify_cpu(batches: &mut [Packets], reject_non_vote: bool) { +pub fn ed25519_verify_cpu(batches: &mut [PacketBatch], reject_non_vote: bool) { use rayon::prelude::*; - let count = batch_size(batches); - debug!("CPU ECDSA for {}", batch_size(batches)); + let packet_count = count_packets_in_batches(batches); + debug!("CPU ECDSA for {}", packet_count); PAR_THREAD_POOL.install(|| { - batches.into_par_iter().for_each(|p| { - p.packets + batches.into_par_iter().for_each(|batch| { + batch + .packets .par_iter_mut() .for_each(|p| verify_packet(p, reject_non_vote)) }) }); - inc_new_counter_debug!("ed25519_verify_cpu", count); + inc_new_counter_debug!("ed25519_verify_cpu", packet_count); } -pub fn ed25519_verify_disabled(batches: &mut [Packets]) { +pub fn ed25519_verify_disabled(batches: &mut [PacketBatch]) { use rayon::prelude::*; - let count = batch_size(batches); - debug!("disabled ECDSA for {}", batch_size(batches)); - batches.into_par_iter().for_each(|p| { - p.packets + let packet_count = count_packets_in_batches(batches); + debug!("disabled ECDSA for {}", packet_count); + batches.into_par_iter().for_each(|batch| { + batch + .packets .par_iter_mut() - .for_each(|p| p.meta.discard = false) + .for_each(|p| p.meta.set_discard(false)) }); - inc_new_counter_debug!("ed25519_verify_disabled", count); + inc_new_counter_debug!("ed25519_verify_disabled", packet_count); } pub fn copy_return_values(sig_lens: &[Vec], out: &PinnedVec, rvs: &mut Vec>) { @@ -495,16 +497,16 @@ pub fn get_checked_scalar(scalar: &[u8; 32]) -> Result<[u8; 32], PacketError> { Ok(out) } -pub fn mark_disabled(batches: &mut [Packets], r: &[Vec]) { - batches.iter_mut().zip(r).for_each(|(b, v)| { - b.packets.iter_mut().zip(v).for_each(|(p, f)| { - p.meta.discard = *f == 0; - }) - }); +pub fn mark_disabled(batches: &mut [PacketBatch], r: &[Vec]) { + for (batch, v) in batches.iter_mut().zip(r) { + for (pkt, f) in batch.packets.iter_mut().zip(v) { + pkt.meta.set_discard(*f == 0); + } + } } pub fn ed25519_verify( - batches: &mut [Packets], + batches: &mut [PacketBatch], recycler: &Recycler, recycler_out: &Recycler>, reject_non_vote: bool, @@ -516,21 +518,21 @@ pub fn ed25519_verify( let api = api.unwrap(); use crate::packet::PACKET_DATA_SIZE; - let count = batch_size(batches); + let packet_count = count_packets_in_batches(batches); // micro-benchmarks show GPU time for smallest batch around 15-20ms // and CPU speed for 64-128 sigverifies around 10-20ms. 64 is a nice // power-of-two number around that accounting for the fact that the CPU // may be busy doing other things while being a real validator // TODO: dynamically adjust this crossover - if count < 64 { + if packet_count < 64 { return ed25519_verify_cpu(batches, reject_non_vote); } let (signature_offsets, pubkey_offsets, msg_start_offsets, msg_sizes, sig_lens) = generate_offsets(batches, recycler, reject_non_vote); - debug!("CUDA ECDSA for {}", batch_size(batches)); + debug!("CUDA ECDSA for {}", packet_count); debug!("allocating out.."); let mut out = recycler_out.allocate("out_buffer"); out.set_pinnable(); @@ -538,15 +540,15 @@ pub fn ed25519_verify( let mut rvs = Vec::new(); let mut num_packets: usize = 0; - for p in batches.iter() { + for batch in batches.iter() { elems.push(perf_libs::Elems { - elems: p.packets.as_ptr(), - num: p.packets.len() as u32, + elems: batch.packets.as_ptr(), + num: batch.packets.len() as u32, }); let mut v = Vec::new(); - v.resize(p.packets.len(), 0); + v.resize(batch.packets.len(), 0); rvs.push(v); - num_packets = num_packets.saturating_add(p.packets.len()); + num_packets = num_packets.saturating_add(batch.packets.len()); } out.resize(signature_offsets.len(), 0); trace!("Starting verify num packets: {}", num_packets); @@ -575,7 +577,7 @@ pub fn ed25519_verify( trace!("done verify"); copy_return_values(&sig_lens, &out, &mut rvs); mark_disabled(batches, &rvs); - inc_new_counter_debug!("ed25519_verify_gpu", count); + inc_new_counter_debug!("ed25519_verify_gpu", packet_count); } #[cfg(test)] @@ -595,9 +597,9 @@ mod tests { use { super::*, crate::{ - packet::{Packet, Packets}, + packet::{Packet, PacketBatch}, sigverify::{self, PacketOffsets}, - test_tx::{test_multisig_tx, test_tx, vote_tx}, + test_tx::{new_test_vote_tx, test_multisig_tx, test_tx}, }, bincode::{deserialize, serialize}, solana_sdk::{ @@ -623,13 +625,13 @@ mod tests { #[test] fn test_mark_disabled() { - let mut batch = Packets::default(); + let mut batch = PacketBatch::default(); batch.packets.push(Packet::default()); - let mut batches: Vec = vec![batch]; + let mut batches: Vec = vec![batch]; mark_disabled(&mut batches, &[vec![0]]); - assert!(batches[0].packets[0].meta.discard); + assert!(batches[0].packets[0].meta.discard()); mark_disabled(&mut batches, &[vec![1]]); - assert!(!batches[0].packets[0].meta.discard); + assert!(!batches[0].packets[0].meta.discard()); } #[test] @@ -728,12 +730,12 @@ mod tests { assert_eq!(res, Err(PacketError::InvalidPubkeyLen)); verify_packet(&mut packet, false); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); - packet.meta.discard = false; - let mut batches = generate_packet_vec(&packet, 1, 1); + packet.meta.set_discard(false); + let mut batches = generate_packet_batches(&packet, 1, 1); ed25519_verify(&mut batches); - assert!(batches[0].packets[0].meta.discard); + assert!(batches[0].packets[0].meta.discard()); } #[test] @@ -764,12 +766,12 @@ mod tests { assert_eq!(res, Err(PacketError::InvalidPubkeyLen)); verify_packet(&mut packet, false); - assert!(packet.meta.discard); + assert!(packet.meta.discard()); - packet.meta.discard = false; - let mut batches = generate_packet_vec(&packet, 1, 1); + packet.meta.set_discard(false); + let mut batches = generate_packet_batches(&packet, 1, 1); ed25519_verify(&mut batches); - assert!(batches[0].packets[0].meta.discard); + assert!(batches[0].packets[0].meta.discard()); } #[test] @@ -929,21 +931,21 @@ mod tests { ); } - fn generate_packet_vec( + fn generate_packet_batches( packet: &Packet, num_packets_per_batch: usize, num_batches: usize, - ) -> Vec { + ) -> Vec { // generate packet vector let batches: Vec<_> = (0..num_batches) .map(|_| { - let mut packets = Packets::default(); - packets.packets.resize(0, Packet::default()); + let mut packet_batch = PacketBatch::default(); + packet_batch.packets.resize(0, Packet::default()); for _ in 0..num_packets_per_batch { - packets.packets.push(packet.clone()); + packet_batch.packets.push(packet.clone()); } - assert_eq!(packets.packets.len(), num_packets_per_batch); - packets + assert_eq!(packet_batch.packets.len(), num_packets_per_batch); + packet_batch }) .collect(); assert_eq!(batches.len(), num_batches); @@ -960,7 +962,7 @@ mod tests { packet.data[20] = packet.data[20].wrapping_add(10); } - let mut batches = generate_packet_vec(&packet, n, 2); + let mut batches = generate_packet_batches(&packet, n, 2); // verify packets ed25519_verify(&mut batches); @@ -969,11 +971,11 @@ mod tests { let should_discard = modify_data; assert!(batches .iter() - .flat_map(|p| &p.packets) - .all(|p| p.meta.discard == should_discard)); + .flat_map(|batch| &batch.packets) + .all(|p| p.meta.discard() == should_discard)); } - fn ed25519_verify(batches: &mut [Packets]) { + fn ed25519_verify(batches: &mut [PacketBatch]) { let recycler = Recycler::default(); let recycler_out = Recycler::default(); sigverify::ed25519_verify(batches, &recycler, &recycler_out, false); @@ -986,14 +988,14 @@ mod tests { tx.signatures.pop(); let packet = sigverify::make_packet_from_transaction(tx); - let mut batches = generate_packet_vec(&packet, 1, 1); + let mut batches = generate_packet_batches(&packet, 1, 1); // verify packets ed25519_verify(&mut batches); assert!(batches .iter() - .flat_map(|p| &p.packets) - .all(|p| p.meta.discard)); + .flat_map(|batch| &batch.packets) + .all(|p| p.meta.discard())); } #[test] @@ -1020,7 +1022,7 @@ mod tests { let n = 4; let num_batches = 3; - let mut batches = generate_packet_vec(&packet, n, num_batches); + let mut batches = generate_packet_batches(&packet, n, num_batches); packet.data[40] = packet.data[40].wrapping_add(8); @@ -1035,13 +1037,13 @@ mod tests { ref_vec[0].push(0u8); assert!(batches .iter() - .flat_map(|p| &p.packets) + .flat_map(|batch| &batch.packets) .zip(ref_vec.into_iter().flatten()) .all(|(p, discard)| { if discard == 0 { - p.meta.discard + p.meta.discard() } else { - !p.meta.discard + !p.meta.discard() } })); } @@ -1059,7 +1061,7 @@ mod tests { for _ in 0..50 { let n = thread_rng().gen_range(1, 30); let num_batches = thread_rng().gen_range(2, 30); - let mut batches = generate_packet_vec(&packet, n, num_batches); + let mut batches = generate_packet_batches(&packet, n, num_batches); let num_modifications = thread_rng().gen_range(0, 5); for _ in 0..num_modifications { @@ -1080,8 +1082,8 @@ mod tests { // check result batches .iter() - .flat_map(|p| &p.packets) - .zip(batches_cpu.iter().flat_map(|p| &p.packets)) + .flat_map(|batch| &batch.packets) + .zip(batches_cpu.iter().flat_map(|batch| &batch.packets)) .for_each(|(p1, p2)| assert_eq!(p1, p2)); } } @@ -1185,6 +1187,7 @@ mod tests { #[test] fn test_is_simple_vote_transaction() { solana_logger::setup(); + let mut rng = rand::thread_rng(); // tansfer tx is not { @@ -1193,17 +1196,17 @@ mod tests { let mut packet = sigverify::make_packet_from_transaction(tx); let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap(); check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok(); - assert!(!packet.meta.is_simple_vote_tx); + assert!(!packet.meta.is_simple_vote_tx()); } // single vote tx is { - let mut tx = vote_tx(); + let mut tx = new_test_vote_tx(&mut rng); tx.message.instructions[0].data = vec![1, 2, 3]; let mut packet = sigverify::make_packet_from_transaction(tx); let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap(); check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok(); - assert!(packet.meta.is_simple_vote_tx); + assert!(packet.meta.is_simple_vote_tx()); } // multiple mixed tx is not @@ -1224,22 +1227,24 @@ mod tests { let mut packet = sigverify::make_packet_from_transaction(tx); let packet_offsets = do_get_packet_offsets(&packet, 0).unwrap(); check_for_simple_vote_transaction(&mut packet, &packet_offsets, 0).ok(); - assert!(!packet.meta.is_simple_vote_tx); + assert!(!packet.meta.is_simple_vote_tx()); } } #[test] fn test_is_simple_vote_transaction_with_offsets() { solana_logger::setup(); + let mut rng = rand::thread_rng(); let mut current_offset = 0usize; - let mut batch = Packets::default(); + let mut batch = PacketBatch::default(); batch .packets .push(sigverify::make_packet_from_transaction(test_tx())); + let tx = new_test_vote_tx(&mut rng); batch .packets - .push(sigverify::make_packet_from_transaction(vote_tx())); + .push(sigverify::make_packet_from_transaction(tx)); batch .packets .iter_mut() @@ -1248,9 +1253,9 @@ mod tests { let packet_offsets = do_get_packet_offsets(packet, current_offset).unwrap(); check_for_simple_vote_transaction(packet, &packet_offsets, current_offset).ok(); if index == 1 { - assert!(packet.meta.is_simple_vote_tx); + assert!(packet.meta.is_simple_vote_tx()); } else { - assert!(!packet.meta.is_simple_vote_tx); + assert!(!packet.meta.is_simple_vote_tx()); } current_offset = current_offset.saturating_add(size_of::()); diff --git a/perf/src/test_tx.rs b/perf/src/test_tx.rs index 14ce3ea0f74e32..4e896b547b96af 100644 --- a/perf/src/test_tx.rs +++ b/perf/src/test_tx.rs @@ -1,5 +1,7 @@ use { + rand::{CryptoRng, Rng, RngCore}, solana_sdk::{ + clock::Slot, hash::Hash, instruction::CompiledInstruction, signature::{Keypair, Signer}, @@ -50,15 +52,21 @@ pub fn test_multisig_tx() -> Transaction { ) } -pub fn vote_tx() -> Transaction { - let keypair = Keypair::new(); +pub fn new_test_vote_tx(rng: &mut R) -> Transaction +where + R: CryptoRng + RngCore, +{ + let mut slots: Vec = std::iter::repeat_with(|| rng.gen()).take(5).collect(); + slots.sort_unstable(); + slots.dedup(); + let switch_proof_hash = rng.gen_bool(0.5).then(|| solana_sdk::hash::new_rand(rng)); vote_transaction::new_vote_transaction( - vec![2], - Hash::default(), - Hash::default(), - &keypair, - &keypair, - &keypair, - None, + slots, + solana_sdk::hash::new_rand(rng), // bank_hash + solana_sdk::hash::new_rand(rng), // blockhash + &Keypair::generate(rng), // node_keypair + &Keypair::generate(rng), // vote_keypair + &Keypair::generate(rng), // authorized_voter_keypair + switch_proof_hash, ) } diff --git a/poh-bench/Cargo.toml b/poh-bench/Cargo.toml index d11e760b598479..2940c1ba242822 100644 --- a/poh-bench/Cargo.toml +++ b/poh-bench/Cargo.toml @@ -2,23 +2,24 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-poh-bench" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-poh-bench" +publish = false [dependencies] clap = "2.33.1" log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/poh/Cargo.toml b/poh/Cargo.toml index a3fd44a71e13d3..770550281f1da5 100644 --- a/poh/Cargo.toml +++ b/poh/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-poh" -version = "1.9.0" +version = "1.9.4" description = "Solana PoH" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,21 +13,21 @@ edition = "2021" core_affinity = "0.5.10" crossbeam-channel = "0.5" log = "0.4.14" -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-sys-tuner = { path = "../sys-tuner", version = "=1.9.0" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-sys-tuner = { path = "../sys-tuner", version = "=1.9.4" } thiserror = "1.0" [dev-dependencies] bincode = "1.3.3" matches = "0.1.9" rand = "0.7.0" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/program-runtime/Cargo.toml b/program-runtime/Cargo.toml index af5f36703938e9..f35f8fb1e3d514 100644 --- a/program-runtime/Cargo.toml +++ b/program-runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-program-runtime" -version = "1.9.0" +version = "1.9.4" description = "Solana program runtime" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,8 +19,8 @@ log = "0.4.14" num-derive = { version = "0.3" } num-traits = { version = "0.2" } serde = { version = "1.0.129", features = ["derive", "rc"] } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } thiserror = "1.0" [lib] diff --git a/program-runtime/src/invoke_context.rs b/program-runtime/src/invoke_context.rs index 3d2ec0d5e936db..83641068e3d8cc 100644 --- a/program-runtime/src/invoke_context.rs +++ b/program-runtime/src/invoke_context.rs @@ -10,9 +10,8 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, compute_budget::ComputeBudget, feature_set::{ - demote_program_write_locks, do_support_realloc, neon_evm_compute_budget, - reject_empty_instruction_without_program, remove_native_loader, requestable_heap_size, - tx_wide_compute_cap, FeatureSet, + do_support_realloc, neon_evm_compute_budget, reject_empty_instruction_without_program, + remove_native_loader, requestable_heap_size, tx_wide_compute_cap, FeatureSet, }, hash::Hash, instruction::{AccountMeta, CompiledInstruction, Instruction, InstructionError}, @@ -25,9 +24,18 @@ use { std::{cell::RefCell, collections::HashMap, fmt::Debug, rc::Rc, sync::Arc}, }; +pub type TransactionAccountRefCell = (Pubkey, Rc>); +pub type TransactionAccountRefCells = Vec; + pub type ProcessInstructionWithContext = fn(usize, &[u8], &mut InvokeContext) -> Result<(), InstructionError>; +#[derive(Debug, PartialEq)] +pub struct ProcessInstructionResult { + pub compute_units_consumed: u64, + pub result: Result<(), InstructionError>, +} + #[derive(Clone)] pub struct BuiltinProgram { pub program_id: Pubkey, @@ -138,7 +146,7 @@ pub struct InvokeContext<'a> { invoke_stack: Vec>, rent: Rent, pre_accounts: Vec, - accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [TransactionAccountRefCell], builtin_programs: &'a [BuiltinProgram], pub sysvars: &'a [(Pubkey, Vec)], log_collector: Option>>, @@ -158,7 +166,7 @@ impl<'a> InvokeContext<'a> { #[allow(clippy::too_many_arguments)] pub fn new( rent: Rent, - accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [TransactionAccountRefCell], builtin_programs: &'a [BuiltinProgram], sysvars: &'a [(Pubkey, Vec)], log_collector: Option>>, @@ -190,7 +198,7 @@ impl<'a> InvokeContext<'a> { } pub fn new_mock( - accounts: &'a [(Pubkey, Rc>)], + accounts: &'a [TransactionAccountRefCell], builtin_programs: &'a [BuiltinProgram], ) -> Self { Self::new( @@ -213,7 +221,7 @@ impl<'a> InvokeContext<'a> { message: &Message, instruction: &CompiledInstruction, program_indices: &[usize], - account_indices: Option<&[usize]>, + account_indices: &[usize], ) -> Result<(), InstructionError> { if self.invoke_stack.len() > self.compute_budget.max_invoke_depth { return Err(InstructionError::CallDepth); @@ -279,9 +287,6 @@ impl<'a> InvokeContext<'a> { } // Create the KeyedAccounts that will be passed to the program - let demote_program_write_locks = self - .feature_set - .is_active(&demote_program_write_locks::id()); let keyed_accounts = program_indices .iter() .map(|account_index| { @@ -294,14 +299,14 @@ impl<'a> InvokeContext<'a> { }) .chain(instruction.accounts.iter().map(|index_in_instruction| { let index_in_instruction = *index_in_instruction as usize; - let account_index = if let Some(account_indices) = account_indices { - account_indices[index_in_instruction] - } else { + let account_index = if account_indices.is_empty() { index_in_instruction + } else { + account_indices[index_in_instruction] }; ( message.is_signer(index_in_instruction), - message.is_writable(index_in_instruction, demote_program_write_locks), + message.is_writable(index_in_instruction), &self.accounts[account_index].0, &self.accounts[account_index].1 as &RefCell, ) @@ -326,16 +331,13 @@ impl<'a> InvokeContext<'a> { } /// Verify the results of an instruction - pub fn verify( + fn verify( &mut self, message: &Message, instruction: &CompiledInstruction, program_indices: &[usize], ) -> Result<(), InstructionError> { let program_id = instruction.program_id(&message.account_keys); - let demote_program_write_locks = self - .feature_set - .is_active(&demote_program_write_locks::id()); let do_support_realloc = self.feature_set.is_active(&do_support_realloc::id()); // Verify all executable accounts have zero outstanding refs @@ -361,7 +363,7 @@ impl<'a> InvokeContext<'a> { pre_account .verify( program_id, - message.is_writable(account_index, demote_program_write_locks), + message.is_writable(account_index), &self.rent, &account, &mut self.timings, @@ -395,7 +397,7 @@ impl<'a> InvokeContext<'a> { } /// Verify and update PreAccount state based on program execution - pub fn verify_and_update( + fn verify_and_update( &mut self, instruction: &CompiledInstruction, account_indices: &[usize], @@ -497,12 +499,14 @@ impl<'a> InvokeContext<'a> { if let Some(instruction_recorder) = &self.instruction_recorder { instruction_recorder.record_instruction(instruction); } - self.process_cross_program_instruction( + self.process_instruction( &message, + &message.instructions[0], &program_indices, &account_indices, &caller_write_privileges, - )?; + ) + .result?; // Verify the called program has not misbehaved let do_support_realloc = self.feature_set.is_active(&do_support_realloc::id()); @@ -522,7 +526,7 @@ impl<'a> InvokeContext<'a> { Ok(()) } - /// Helper to prepare for process_cross_program_instruction() + /// Helper to prepare for process_instruction() pub fn create_message( &mut self, instruction: &Instruction, @@ -635,43 +639,63 @@ impl<'a> InvokeContext<'a> { Ok((message, caller_write_privileges, program_indices)) } - /// Process a cross-program instruction - pub fn process_cross_program_instruction( + /// Processes a cross-program instruction and returns how many compute units were used + pub fn process_instruction( &mut self, message: &Message, + instruction: &CompiledInstruction, program_indices: &[usize], account_indices: &[usize], caller_write_privileges: &[bool], - ) -> Result<(), InstructionError> { - // This function is always called with a valid instruction, if that changes return an error - let instruction = message - .instructions - .get(0) - .ok_or(InstructionError::GenericError)?; - - // Verify the calling program hasn't misbehaved - self.verify_and_update(instruction, account_indices, caller_write_privileges)?; - - self.return_data = (*self.get_caller()?, Vec::new()); - self.push(message, instruction, program_indices, Some(account_indices))?; - let result = self.process_instruction(&instruction.data).and_then(|_| { - // Verify the called program has not misbehaved - let demote_program_write_locks = self - .feature_set - .is_active(&demote_program_write_locks::id()); - let write_privileges: Vec = (0..message.account_keys.len()) - .map(|i| message.is_writable(i, demote_program_write_locks)) - .collect(); - self.verify_and_update(instruction, account_indices, &write_privileges) - }); + ) -> ProcessInstructionResult { + let is_lowest_invocation_level = self.invoke_stack.is_empty(); + if !is_lowest_invocation_level { + // Verify the calling program hasn't misbehaved + let result = + self.verify_and_update(instruction, account_indices, caller_write_privileges); + if result.is_err() { + return ProcessInstructionResult { + compute_units_consumed: 0, + result, + }; + } + } - // Restore previous state + let mut compute_units_consumed = 0; + let result = self + .push(message, instruction, program_indices, account_indices) + .and_then(|_| { + self.return_data = (*instruction.program_id(&message.account_keys), Vec::new()); + let pre_remaining_units = self.compute_meter.borrow().get_remaining(); + let execution_result = self.process_executable_chain(&instruction.data); + let post_remaining_units = self.compute_meter.borrow().get_remaining(); + compute_units_consumed = pre_remaining_units.saturating_sub(post_remaining_units); + execution_result?; + + // Verify the called program has not misbehaved + if is_lowest_invocation_level { + self.verify(message, instruction, program_indices) + } else { + let write_privileges: Vec = (0..message.account_keys.len()) + .map(|i| message.is_writable(i)) + .collect(); + self.verify_and_update(instruction, account_indices, &write_privileges) + } + }); + + // Pop the invoke_stack to restore previous state self.pop(); - result + ProcessInstructionResult { + compute_units_consumed, + result, + } } /// Calls the instruction's program entrypoint method - pub fn process_instruction(&mut self, instruction_data: &[u8]) -> Result<(), InstructionError> { + fn process_executable_chain( + &mut self, + instruction_data: &[u8], + ) -> Result<(), InstructionError> { let keyed_accounts = self.get_keyed_accounts()?; let root_account = keyed_account_at_index(keyed_accounts, 0) .map_err(|_| InstructionError::UnsupportedProgramId)?; @@ -718,9 +742,15 @@ impl<'a> InvokeContext<'a> { /// Get the owner of the currently executing program pub fn get_loader(&self) -> Result { - self.get_instruction_keyed_accounts() - .and_then(|keyed_accounts| keyed_accounts.first().ok_or(InstructionError::CallDepth)) - .and_then(|keyed_account| keyed_account.owner()) + let frame = self + .invoke_stack + .last() + .ok_or(InstructionError::CallDepth)?; + let first_instruction_account = frame + .number_of_program_accounts + .checked_sub(1) + .ok_or(InstructionError::CallDepth)?; + frame.keyed_accounts[first_instruction_account].owner() } /// Removes the first keyed account @@ -748,17 +778,15 @@ impl<'a> InvokeContext<'a> { .ok_or(InstructionError::CallDepth) } - /// Get the list of keyed accounts skipping `first_instruction_account` many entries + /// Get the list of keyed accounts without the chain of program accounts + /// + /// Note: This only contains the `KeyedAccount`s passed by the caller. pub fn get_instruction_keyed_accounts(&self) -> Result<&[KeyedAccount], InstructionError> { let frame = self .invoke_stack .last() .ok_or(InstructionError::CallDepth)?; - let first_instruction_account = frame - .number_of_program_accounts - .checked_sub(1) - .ok_or(InstructionError::CallDepth)?; - Ok(&frame.keyed_accounts[first_instruction_account..]) + Ok(&frame.keyed_accounts[frame.number_of_program_accounts..]) } /// Get this invocation's LogCollector @@ -816,7 +844,7 @@ impl<'a> InvokeContext<'a> { } pub struct MockInvokeContextPreparation { - pub accounts: Vec<(Pubkey, Rc>)>, + pub accounts: TransactionAccountRefCells, pub message: Message, pub account_indices: Vec, } @@ -827,10 +855,7 @@ pub fn prepare_mock_invoke_context( keyed_accounts: &[(bool, bool, Pubkey, Rc>)], ) -> MockInvokeContextPreparation { #[allow(clippy::type_complexity)] - let (accounts, mut metas): ( - Vec<(Pubkey, Rc>)>, - Vec, - ) = keyed_accounts + let (accounts, mut metas): (TransactionAccountRefCells, Vec) = keyed_accounts .iter() .map(|(is_signer, is_writable, pubkey, account)| { ( @@ -909,17 +934,18 @@ pub fn with_mock_invoke_context R>( &preparation.message, &preparation.message.instructions[0], &program_indices, - Some(&preparation.account_indices), + &preparation.account_indices, ) .unwrap(); callback(&mut invoke_context) } -pub fn mock_process_instruction( +pub fn mock_process_instruction_with_sysvars( loader_id: &Pubkey, mut program_indices: Vec, instruction_data: &[u8], keyed_accounts: &[(bool, bool, Pubkey, Rc>)], + sysvars: &[(Pubkey, Vec)], process_instruction: ProcessInstructionWithContext, ) -> Result<(), InstructionError> { let mut preparation = @@ -928,15 +954,33 @@ pub fn mock_process_instruction( program_indices.insert(0, preparation.accounts.len()); preparation.accounts.push((*loader_id, processor_account)); let mut invoke_context = InvokeContext::new_mock(&preparation.accounts, &[]); + invoke_context.sysvars = sysvars; invoke_context.push( &preparation.message, &preparation.message.instructions[0], &program_indices, - Some(&preparation.account_indices), + &preparation.account_indices, )?; process_instruction(1, instruction_data, &mut invoke_context) } +pub fn mock_process_instruction( + loader_id: &Pubkey, + program_indices: Vec, + instruction_data: &[u8], + keyed_accounts: &[(bool, bool, Pubkey, Rc>)], + process_instruction: ProcessInstructionWithContext, +) -> Result<(), InstructionError> { + mock_process_instruction_with_sysvars( + loader_id, + program_indices, + instruction_data, + keyed_accounts, + &[], + process_instruction, + ) +} + #[cfg(test)] mod tests { use { @@ -957,6 +1001,10 @@ mod tests { ModifyOwned, ModifyNotOwned, ModifyReadonly, + ConsumeComputeUnits { + compute_units_consumed: u64, + desired_result: Result<(), InstructionError>, + }, } #[test] @@ -1026,6 +1074,17 @@ mod tests { .try_account_ref_mut()? .data_as_mut_slice()[0] = 1 } + MockInstruction::ConsumeComputeUnits { + compute_units_consumed, + desired_result, + } => { + invoke_context + .get_compute_meter() + .borrow_mut() + .consume(compute_units_consumed) + .unwrap(); + return desired_result; + } } } else { return Err(InstructionError::InvalidInstructionData); @@ -1078,7 +1137,7 @@ mod tests { &message, &message.instructions[0], &[MAX_DEPTH + depth_reached], - None, + &[], ) { break; @@ -1104,7 +1163,7 @@ mod tests { None, ); let write_privileges: Vec = (0..message.account_keys.len()) - .map(|i| message.is_writable(i, /*demote_program_write_locks=*/ true)) + .map(|i| message.is_writable(i)) .collect(); // modify account owned by the program @@ -1157,7 +1216,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); assert!(invoke_context .verify(&message, &message.instructions[0], &[0]) @@ -1222,27 +1281,27 @@ mod tests { }]; let mut invoke_context = InvokeContext::new_mock(&accounts, builtin_programs); invoke_context - .push(&message, &caller_instruction, &program_indices[..1], None) + .push(&message, &caller_instruction, &program_indices[..1], &[]) .unwrap(); // not owned account modified by the caller (before the invoke) - let demote_program_write_locks = invoke_context - .feature_set - .is_active(&demote_program_write_locks::id()); let caller_write_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i, demote_program_write_locks)) + .map(|(i, _)| message.is_writable(i)) .collect::>(); accounts[0].1.borrow_mut().data_as_mut_slice()[0] = 1; assert_eq!( - invoke_context.process_cross_program_instruction( - &message, - &program_indices[1..], - &account_indices, - &caller_write_privileges, - ), + invoke_context + .process_instruction( + &message, + &message.instructions[0], + &program_indices[1..], + &account_indices, + &caller_write_privileges, + ) + .result, Err(InstructionError::ExternalAccountDataModified) ); accounts[0].1.borrow_mut().data_as_mut_slice()[0] = 0; @@ -1250,12 +1309,15 @@ mod tests { // readonly account modified by the invoker accounts[2].1.borrow_mut().data_as_mut_slice()[0] = 1; assert_eq!( - invoke_context.process_cross_program_instruction( - &message, - &program_indices[1..], - &account_indices, - &caller_write_privileges, - ), + invoke_context + .process_instruction( + &message, + &message.instructions[0], + &program_indices[1..], + &account_indices, + &caller_write_privileges, + ) + .result, Err(InstructionError::ReadonlyDataModified) ); accounts[2].1.borrow_mut().data_as_mut_slice()[0] = 0; @@ -1263,15 +1325,33 @@ mod tests { invoke_context.pop(); let cases = vec![ - (MockInstruction::NoopSuccess, Ok(())), + ( + MockInstruction::NoopSuccess, + ProcessInstructionResult { + result: Ok(()), + compute_units_consumed: 0, + }, + ), ( MockInstruction::NoopFail, - Err(InstructionError::GenericError), + ProcessInstructionResult { + result: Err(InstructionError::GenericError), + compute_units_consumed: 0, + }, + ), + ( + MockInstruction::ModifyOwned, + ProcessInstructionResult { + result: Ok(()), + compute_units_consumed: 0, + }, ), - (MockInstruction::ModifyOwned, Ok(())), ( MockInstruction::ModifyNotOwned, - Err(InstructionError::ExternalAccountDataModified), + ProcessInstructionResult { + result: Err(InstructionError::ExternalAccountDataModified), + compute_units_consumed: 0, + }, ), ]; for case in cases { @@ -1279,17 +1359,18 @@ mod tests { Instruction::new_with_bincode(callee_program_id, &case.0, metas.clone()); let message = Message::new(&[callee_instruction], None); invoke_context - .push(&message, &caller_instruction, &program_indices[..1], None) + .push(&message, &caller_instruction, &program_indices[..1], &[]) .unwrap(); let caller_write_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i, demote_program_write_locks)) + .map(|(i, _)| message.is_writable(i)) .collect::>(); assert_eq!( - invoke_context.process_cross_program_instruction( + invoke_context.process_instruction( &message, + &message.instructions[0], &program_indices[1..], &account_indices, &caller_write_privileges, @@ -1350,7 +1431,7 @@ mod tests { }]; let mut invoke_context = InvokeContext::new_mock(&accounts, builtin_programs); invoke_context - .push(&message, &caller_instruction, &program_indices, None) + .push(&message, &caller_instruction, &program_indices, &[]) .unwrap(); // not owned account modified by the invoker @@ -1393,7 +1474,7 @@ mod tests { Instruction::new_with_bincode(callee_program_id, &case.0, metas.clone()); let message = Message::new(&[callee_instruction.clone()], None); invoke_context - .push(&message, &caller_instruction, &program_indices, None) + .push(&message, &caller_instruction, &program_indices, &[]) .unwrap(); assert_eq!( invoke_context.native_invoke(callee_instruction, &[]), @@ -1440,7 +1521,7 @@ mod tests { invoke_context.feature_set = Arc::new(feature_set); invoke_context - .push(&noop_message, &noop_message.instructions[0], &[0], None) + .push(&noop_message, &noop_message.instructions[0], &[0], &[]) .unwrap(); assert_eq!( *invoke_context.get_compute_budget(), @@ -1449,7 +1530,7 @@ mod tests { invoke_context.pop(); invoke_context - .push(&neon_message, &neon_message.instructions[0], &[1], None) + .push(&neon_message, &neon_message.instructions[0], &[1], &[]) .unwrap(); let expected_compute_budget = ComputeBudget { max_units: 500_000, @@ -1463,7 +1544,7 @@ mod tests { invoke_context.pop(); invoke_context - .push(&noop_message, &noop_message.instructions[0], &[0], None) + .push(&noop_message, &noop_message.instructions[0], &[0], &[]) .unwrap(); assert_eq!( *invoke_context.get_compute_budget(), @@ -1471,4 +1552,92 @@ mod tests { ); invoke_context.pop(); } + + #[test] + fn test_process_instruction_compute_budget() { + let caller_program_id = solana_sdk::pubkey::new_rand(); + let callee_program_id = solana_sdk::pubkey::new_rand(); + let owned_account = AccountSharedData::new(42, 1, &callee_program_id); + let not_owned_account = AccountSharedData::new(84, 1, &solana_sdk::pubkey::new_rand()); + let readonly_account = AccountSharedData::new(168, 1, &solana_sdk::pubkey::new_rand()); + let loader_account = AccountSharedData::new(0, 0, &native_loader::id()); + let mut program_account = AccountSharedData::new(1, 0, &native_loader::id()); + program_account.set_executable(true); + + let accounts = vec![ + ( + solana_sdk::pubkey::new_rand(), + Rc::new(RefCell::new(owned_account)), + ), + ( + solana_sdk::pubkey::new_rand(), + Rc::new(RefCell::new(not_owned_account)), + ), + ( + solana_sdk::pubkey::new_rand(), + Rc::new(RefCell::new(readonly_account)), + ), + (caller_program_id, Rc::new(RefCell::new(loader_account))), + (callee_program_id, Rc::new(RefCell::new(program_account))), + ]; + let account_indices = [0, 1, 2]; + let program_indices = [3, 4]; + + let metas = vec![ + AccountMeta::new(accounts[0].0, false), + AccountMeta::new(accounts[1].0, false), + AccountMeta::new_readonly(accounts[2].0, false), + ]; + + let builtin_programs = &[BuiltinProgram { + program_id: callee_program_id, + process_instruction: mock_process_instruction, + }]; + let mut invoke_context = InvokeContext::new_mock(&accounts, builtin_programs); + + let compute_units_consumed = 10; + let desired_results = vec![Ok(()), Err(InstructionError::GenericError)]; + + for desired_result in desired_results { + let caller_instruction = + CompiledInstruction::new(program_indices[0] as u8, &(), vec![0, 1, 2, 3, 4]); + let callee_instruction = Instruction::new_with_bincode( + callee_program_id, + &MockInstruction::ConsumeComputeUnits { + compute_units_consumed, + desired_result: desired_result.clone(), + }, + metas.clone(), + ); + let message = Message::new(&[callee_instruction.clone()], None); + invoke_context + .push(&message, &caller_instruction, &program_indices[..1], &[]) + .unwrap(); + let caller_write_privileges = message + .account_keys + .iter() + .enumerate() + .map(|(i, _)| message.is_writable(i)) + .collect::>(); + let result = invoke_context.process_instruction( + &message, + &message.instructions[0], + &program_indices[1..], + &account_indices, + &caller_write_privileges, + ); + + // Because the instruction had compute cost > 0, then regardless of the execution result, + // the number of compute units consumed should be a non-default which is something greater + // than zero. + assert!(result.compute_units_consumed > 0); + assert_eq!( + result, + ProcessInstructionResult { + compute_units_consumed, + result: desired_result, + } + ); + } + } } diff --git a/program-runtime/src/log_collector.rs b/program-runtime/src/log_collector.rs index 0618291db2140b..698811fde4a15d 100644 --- a/program-runtime/src/log_collector.rs +++ b/program-runtime/src/log_collector.rs @@ -43,7 +43,11 @@ impl From for Vec { #[macro_export] macro_rules! ic_logger_msg { ($log_collector:expr, $message:expr) => { - $crate::log_collector::log::debug!("{}", $message); + $crate::log_collector::log::debug!( + target: "solana_runtime::message_processor::stable_log", + "{}", + $message + ); if let Some(log_collector) = $log_collector.as_ref() { if let Ok(mut log_collector) = log_collector.try_borrow_mut() { log_collector.log($message); @@ -51,7 +55,11 @@ macro_rules! ic_logger_msg { } }; ($log_collector:expr, $fmt:expr, $($arg:tt)*) => { - $crate::log_collector::log::debug!($fmt, $($arg)*); + $crate::log_collector::log::debug!( + target: "solana_runtime::message_processor::stable_log", + $fmt, + $($arg)* + ); if let Some(log_collector) = $log_collector.as_ref() { if let Ok(mut log_collector) = log_collector.try_borrow_mut() { log_collector.log(&format!($fmt, $($arg)*)); diff --git a/program-runtime/src/timings.rs b/program-runtime/src/timings.rs index a61b621e1a7af4..ec47835dae3213 100644 --- a/program-runtime/src/timings.rs +++ b/program-runtime/src/timings.rs @@ -1,13 +1,41 @@ use {solana_sdk::pubkey::Pubkey, std::collections::HashMap}; -#[derive(Default, Debug)] +#[derive(Default, Debug, PartialEq)] pub struct ProgramTiming { pub accumulated_us: u64, pub accumulated_units: u64, pub count: u32, + pub errored_txs_compute_consumed: Vec, + // Sum of all units in `errored_txs_compute_consumed` + pub total_errored_units: u64, } -#[derive(Default, Debug)] +impl ProgramTiming { + pub fn coalesce_error_timings(&mut self, current_estimated_program_cost: u64) { + for tx_error_compute_consumed in self.errored_txs_compute_consumed.drain(..) { + let compute_units_update = + std::cmp::max(current_estimated_program_cost, tx_error_compute_consumed); + self.accumulated_units = self.accumulated_units.saturating_add(compute_units_update); + self.count = self.count.saturating_add(1); + } + } + + pub fn accumulate_program_timings(&mut self, other: &ProgramTiming) { + self.accumulated_us = self.accumulated_us.saturating_add(other.accumulated_us); + self.accumulated_units = self + .accumulated_units + .saturating_add(other.accumulated_units); + self.count = self.count.saturating_add(other.count); + // Clones the entire vector, maybe not great... + self.errored_txs_compute_consumed + .extend(other.errored_txs_compute_consumed.clone()); + self.total_errored_units = self + .total_errored_units + .saturating_add(other.total_errored_units); + } +} + +#[derive(Default, Debug, PartialEq)] pub struct ExecuteDetailsTimings { pub serialize_us: u64, pub create_vm_us: u64, @@ -37,19 +65,117 @@ impl ExecuteDetailsTimings { .saturating_add(other.data_size_changed); for (id, other) in &other.per_program_timings { let program_timing = self.per_program_timings.entry(*id).or_default(); - program_timing.accumulated_us = program_timing - .accumulated_us - .saturating_add(other.accumulated_us); - program_timing.accumulated_units = program_timing - .accumulated_units - .saturating_add(other.accumulated_units); - program_timing.count = program_timing.count.saturating_add(other.count); + program_timing.accumulate_program_timings(other); } } - pub fn accumulate_program(&mut self, program_id: &Pubkey, us: u64, units: u64) { + + pub fn accumulate_program( + &mut self, + program_id: &Pubkey, + us: u64, + compute_units_consumed: u64, + is_error: bool, + ) { let program_timing = self.per_program_timings.entry(*program_id).or_default(); program_timing.accumulated_us = program_timing.accumulated_us.saturating_add(us); - program_timing.accumulated_units = program_timing.accumulated_units.saturating_add(units); - program_timing.count = program_timing.count.saturating_add(1); + if is_error { + program_timing + .errored_txs_compute_consumed + .push(compute_units_consumed); + program_timing.total_errored_units = program_timing + .total_errored_units + .saturating_add(compute_units_consumed); + } else { + program_timing.accumulated_units = program_timing + .accumulated_units + .saturating_add(compute_units_consumed); + program_timing.count = program_timing.count.saturating_add(1); + }; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn construct_execute_timings_with_program( + program_id: &Pubkey, + us: u64, + compute_units_consumed: u64, + ) -> ExecuteDetailsTimings { + let mut execute_details_timings = ExecuteDetailsTimings::default(); + + // Accumulate an erroring transaction + let is_error = true; + execute_details_timings.accumulate_program( + program_id, + us, + compute_units_consumed, + is_error, + ); + + // Accumulate a non-erroring transaction + let is_error = false; + execute_details_timings.accumulate_program( + program_id, + us, + compute_units_consumed, + is_error, + ); + + let program_timings = execute_details_timings + .per_program_timings + .get(program_id) + .unwrap(); + + // Both error and success transactions count towards `accumulated_us` + assert_eq!(program_timings.accumulated_us, us.saturating_mul(2)); + assert_eq!(program_timings.accumulated_units, compute_units_consumed); + assert_eq!(program_timings.count, 1,); + assert_eq!( + program_timings.errored_txs_compute_consumed, + vec![compute_units_consumed] + ); + assert_eq!(program_timings.total_errored_units, compute_units_consumed,); + + execute_details_timings + } + + #[test] + fn test_execute_details_timing_acumulate_program() { + // Acumulate an erroring transaction + let program_id = Pubkey::new_unique(); + let us = 100; + let compute_units_consumed = 1; + construct_execute_timings_with_program(&program_id, us, compute_units_consumed); + } + + #[test] + fn test_execute_details_timing_acumulate() { + // Acumulate an erroring transaction + let program_id = Pubkey::new_unique(); + let us = 100; + let compute_units_consumed = 1; + let mut execute_details_timings = ExecuteDetailsTimings::default(); + + // Construct another separate instance of ExecuteDetailsTimings with non default fields + let mut other_execute_details_timings = + construct_execute_timings_with_program(&program_id, us, compute_units_consumed); + let account_count = 1; + let data_size_changed = 1; + other_execute_details_timings.serialize_us = us; + other_execute_details_timings.create_vm_us = us; + other_execute_details_timings.execute_us = us; + other_execute_details_timings.deserialize_us = us; + other_execute_details_timings.changed_account_count = account_count; + other_execute_details_timings.total_account_count = account_count; + other_execute_details_timings.total_data_size = data_size_changed; + other_execute_details_timings.data_size_changed = data_size_changed; + + // Accumulate the other instance into the current instance + execute_details_timings.accumulate(&other_execute_details_timings); + + // Check that the two instances are equal + assert_eq!(execute_details_timings, other_execute_details_timings); } } diff --git a/program-test/Cargo.toml b/program-test/Cargo.toml index 19e667332dabb4..91964ff9118bd5 100644 --- a/program-test/Cargo.toml +++ b/program-test/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" license = "Apache-2.0" name = "solana-program-test" repository = "https://github.com/solana-labs/solana" -version = "1.9.0" +version = "1.9.4" [dependencies] async-trait = "0.1.51" @@ -14,14 +14,14 @@ bincode = "1.3.3" chrono-humanize = "0.2.1" log = "0.4.14" serde = "1.0.130" -solana-banks-client = { path = "../banks-client", version = "=1.9.0" } -solana-banks-server = { path = "../banks-server", version = "=1.9.0" } -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-banks-client = { path = "../banks-client", version = "=1.9.4" } +solana-banks-server = { path = "../banks-server", version = "=1.9.4" } +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } thiserror = "1.0" tokio = { version = "1", features = ["full"] } diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 77c4ffdefd241d..16338e20efe728 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -26,7 +26,6 @@ use { compute_budget::ComputeBudget, entrypoint::{ProgramResult, SUCCESS}, epoch_schedule::EpochSchedule, - feature_set::demote_program_write_locks, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::{ClusterType, GenesisConfig}, hash::Hash, @@ -41,7 +40,7 @@ use { sysvar::{ clock, epoch_schedule, fees::{self}, - rent, Sysvar, + rent, Sysvar, SysvarId, }, }, solana_vote_program::vote_state::{VoteState, VoteStateVersions}, @@ -244,15 +243,12 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { let message = Message::new(&[instruction.clone()], None); let program_id_index = message.instructions[0].program_id_index as usize; let program_id = message.account_keys[program_id_index]; - let demote_program_write_locks = invoke_context - .feature_set - .is_active(&demote_program_write_locks::id()); // TODO don't have the caller's keyed_accounts so can't validate writer or signer escalation or deescalation yet let caller_privileges = message .account_keys .iter() .enumerate() - .map(|(i, _)| message.is_writable(i, demote_program_write_locks)) + .map(|(i, _)| message.is_writable(i)) .collect::>(); stable_log::program_invoke(&log_collector, &program_id, invoke_context.invoke_depth()); @@ -278,7 +274,7 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { account.set_executable(account_info.executable); account.set_rent_epoch(account_info.rent_epoch); } - let account_info = if message.is_writable(i, demote_program_write_locks) { + let account_info = if message.is_writable(i) { Some(account_info) } else { None @@ -319,12 +315,14 @@ impl solana_sdk::program_stubs::SyscallStubs for SyscallStubs { } invoke_context - .process_cross_program_instruction( + .process_instruction( &message, + &message.instructions[0], &program_indices, &account_indices, &caller_privileges, ) + .result .map_err(|err| ProgramError::try_from(err).unwrap_or_else(|err| panic!("{}", err)))?; // Copy writeable account modifications back into the caller's AccountInfos @@ -418,7 +416,7 @@ fn setup_fees(bank: Bank) -> Bank { bank.commit_transactions( &[], // transactions &mut [], // loaded accounts - &[], // transaction execution results + vec![], // transaction execution results 0, // tx count 1, // signature count &mut ExecuteTimings::default(), @@ -813,6 +811,7 @@ impl ProgramTest { genesis_config, mint_keypair, voting_keypair, + validator_pubkey: bootstrap_validator_pubkey, }, ) } @@ -1044,6 +1043,18 @@ impl ProgramTestContext { bank.store_account(address, account); } + /// Create or overwrite a sysvar, subverting normal runtime checks. + /// + /// This method exists to make it easier to set up artificial situations + /// that would be difficult to replicate on a new test cluster. Beware + /// that it can be used to create states that would not be reachable + /// under normal conditions! + pub fn set_sysvar(&self, sysvar: &T) { + let bank_forks = self.bank_forks.read().unwrap(); + let bank = bank_forks.working_bank(); + bank.set_sysvar_for_tests(sysvar); + } + /// Force the working bank ahead to a new slot pub fn warp_to_slot(&mut self, warp_slot: Slot) -> Result<(), ProgramTestError> { let mut bank_forks = self.bank_forks.write().unwrap(); diff --git a/programs/address-lookup-table-tests/Cargo.toml b/programs/address-lookup-table-tests/Cargo.toml new file mode 100644 index 00000000000000..955f8edb94fdcc --- /dev/null +++ b/programs/address-lookup-table-tests/Cargo.toml @@ -0,0 +1,22 @@ +# This package only exists to avoid circular dependencies during cargo publish: +# solana-runtime -> solana-address-program-runtime -> solana-program-test -> solana-runtime + +[package] +name = "solana-address-lookup-table-program-tests" +version = "1.9.4" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +edition = "2021" +publish = false + +[dev-dependencies] +assert_matches = "1.5.0" +bincode = "1.3.3" +solana-address-lookup-table-program = { path = "../address-lookup-table", version = "=1.9.4" } +solana-program-test = { path = "../../program-test", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs new file mode 100644 index 00000000000000..3fd6225502d918 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/close_lookup_table_ix.rs @@ -0,0 +1,216 @@ +use { + assert_matches::assert_matches, + common::{ + add_lookup_table_account, assert_ix_error, new_address_lookup_table, + overwrite_slot_hashes_with_slots, setup_test_context, + }, + solana_address_lookup_table_program::instruction::close_lookup_table, + solana_program_test::*, + solana_sdk::{ + clock::Clock, + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_close_lookup_table() { + let mut context = setup_test_context().await; + overwrite_slot_hashes_with_slots(&mut context, &[]); + + let lookup_table_address = Pubkey::new_unique(); + let authority_keypair = Keypair::new(); + let initialized_table = { + let mut table = new_address_lookup_table(Some(authority_keypair.pubkey()), 0); + table.meta.deactivation_slot = 0; + table + }; + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let transaction = Transaction::new_signed_with_payer( + &[close_lookup_table( + lookup_table_address, + authority_keypair.pubkey(), + context.payer.pubkey(), + )], + Some(&payer.pubkey()), + &[payer, &authority_keypair], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + assert!(client + .get_account(lookup_table_address) + .await + .unwrap() + .is_none()); +} + +#[tokio::test] +async fn test_close_lookup_table_not_deactivated() { + let mut context = setup_test_context().await; + + let authority_keypair = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority_keypair.pubkey()), 0); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = close_lookup_table( + lookup_table_address, + authority_keypair.pubkey(), + context.payer.pubkey(), + ); + + // The ix should fail because the table hasn't been deactivated yet + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_close_lookup_table_deactivated_in_current_slot() { + let mut context = setup_test_context().await; + + let clock = context.banks_client.get_sysvar::().await.unwrap(); + let authority_keypair = Keypair::new(); + let initialized_table = { + let mut table = new_address_lookup_table(Some(authority_keypair.pubkey()), 0); + table.meta.deactivation_slot = clock.slot; + table + }; + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = close_lookup_table( + lookup_table_address, + authority_keypair.pubkey(), + context.payer.pubkey(), + ); + + // Context sets up the slot hashes sysvar to have an entry + // for slot 0 which is when the table was deactivated. + // Because that slot is present, the ix should fail. + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_close_lookup_table_recently_deactivated() { + let mut context = setup_test_context().await; + + let authority_keypair = Keypair::new(); + let initialized_table = { + let mut table = new_address_lookup_table(Some(authority_keypair.pubkey()), 0); + table.meta.deactivation_slot = 0; + table + }; + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = close_lookup_table( + lookup_table_address, + authority_keypair.pubkey(), + context.payer.pubkey(), + ); + + // Context sets up the slot hashes sysvar to have an entry + // for slot 0 which is when the table was deactivated. + // Because that slot is present, the ix should fail. + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_close_immutable_lookup_table() { + let mut context = setup_test_context().await; + + let initialized_table = new_address_lookup_table(None, 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let authority = Keypair::new(); + let ix = close_lookup_table( + lookup_table_address, + authority.pubkey(), + Pubkey::new_unique(), + ); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::Immutable, + ) + .await; +} + +#[tokio::test] +async fn test_close_lookup_table_with_wrong_authority() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let wrong_authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = close_lookup_table( + lookup_table_address, + wrong_authority.pubkey(), + Pubkey::new_unique(), + ); + + assert_ix_error( + &mut context, + ix, + Some(&wrong_authority), + InstructionError::IncorrectAuthority, + ) + .await; +} + +#[tokio::test] +async fn test_close_lookup_table_without_signing() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let mut ix = close_lookup_table( + lookup_table_address, + authority.pubkey(), + Pubkey::new_unique(), + ); + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} diff --git a/programs/address-lookup-table-tests/tests/common.rs b/programs/address-lookup-table-tests/tests/common.rs new file mode 100644 index 00000000000000..a29fd6010f6174 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/common.rs @@ -0,0 +1,103 @@ +#![allow(dead_code)] +use { + solana_address_lookup_table_program::{ + id, + processor::process_instruction, + state::{AddressLookupTable, LookupTableMeta}, + }, + solana_program_test::*, + solana_sdk::{ + account::AccountSharedData, + clock::Slot, + hash::Hash, + instruction::Instruction, + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + slot_hashes::SlotHashes, + transaction::{Transaction, TransactionError}, + }, + std::borrow::Cow, +}; + +pub async fn setup_test_context() -> ProgramTestContext { + let program_test = ProgramTest::new("", id(), Some(process_instruction)); + program_test.start_with_context().await +} + +pub async fn assert_ix_error( + context: &mut ProgramTestContext, + ix: Instruction, + authority_keypair: Option<&Keypair>, + expected_err: InstructionError, +) { + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + + let mut signers = vec![payer]; + if let Some(authority) = authority_keypair { + signers.push(authority); + } + + let transaction = Transaction::new_signed_with_payer( + &[ix], + Some(&payer.pubkey()), + &signers, + recent_blockhash, + ); + + assert_eq!( + client + .process_transaction(transaction) + .await + .unwrap_err() + .unwrap(), + TransactionError::InstructionError(0, expected_err), + ); +} + +pub fn new_address_lookup_table( + authority: Option, + num_addresses: usize, +) -> AddressLookupTable<'static> { + let mut addresses = Vec::with_capacity(num_addresses); + addresses.resize_with(num_addresses, Pubkey::new_unique); + AddressLookupTable { + meta: LookupTableMeta { + authority, + ..LookupTableMeta::default() + }, + addresses: Cow::Owned(addresses), + } +} + +pub async fn add_lookup_table_account( + context: &mut ProgramTestContext, + account_address: Pubkey, + address_lookup_table: AddressLookupTable<'static>, +) -> AccountSharedData { + let mut data = Vec::new(); + address_lookup_table.serialize_for_tests(&mut data).unwrap(); + + let rent = context.banks_client.get_rent().await.unwrap(); + let rent_exempt_balance = rent.minimum_balance(data.len()); + + let mut account = AccountSharedData::new( + rent_exempt_balance, + data.len(), + &solana_address_lookup_table_program::id(), + ); + account.set_data(data); + context.set_account(&account_address, &account); + + account +} + +pub fn overwrite_slot_hashes_with_slots(context: &mut ProgramTestContext, slots: &[Slot]) { + let mut slot_hashes = SlotHashes::default(); + for slot in slots { + slot_hashes.add(*slot, Hash::new_unique()); + } + context.set_sysvar(&slot_hashes); +} diff --git a/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs new file mode 100644 index 00000000000000..b91a318028e1df --- /dev/null +++ b/programs/address-lookup-table-tests/tests/create_lookup_table_ix.rs @@ -0,0 +1,158 @@ +use { + assert_matches::assert_matches, + common::{assert_ix_error, overwrite_slot_hashes_with_slots, setup_test_context}, + solana_address_lookup_table_program::{ + id, + instruction::create_lookup_table, + state::{AddressLookupTable, LOOKUP_TABLE_META_SIZE}, + }, + solana_program_test::*, + solana_sdk::{ + clock::Slot, instruction::InstructionError, pubkey::Pubkey, rent::Rent, signature::Signer, + signer::keypair::Keypair, transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_create_lookup_table() { + let mut context = setup_test_context().await; + + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + let (create_lookup_table_ix, lookup_table_address) = + create_lookup_table(authority_address, payer.pubkey(), test_recent_slot); + + // First create should succeed + { + let transaction = Transaction::new_signed_with_payer( + &[create_lookup_table_ix.clone()], + Some(&payer.pubkey()), + &[payer, &authority_keypair], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + let lookup_table_account = client + .get_account(lookup_table_address) + .await + .unwrap() + .unwrap(); + assert_eq!(lookup_table_account.owner, crate::id()); + assert_eq!(lookup_table_account.data.len(), LOOKUP_TABLE_META_SIZE); + assert_eq!( + lookup_table_account.lamports, + Rent::default().minimum_balance(LOOKUP_TABLE_META_SIZE) + ); + let lookup_table = AddressLookupTable::deserialize(&lookup_table_account.data).unwrap(); + assert_eq!(lookup_table.meta.deactivation_slot, Slot::MAX); + assert_eq!(lookup_table.meta.authority, Some(authority_address)); + assert_eq!(lookup_table.meta.last_extended_slot, 0); + assert_eq!(lookup_table.meta.last_extended_slot_start_index, 0); + assert_eq!(lookup_table.addresses.len(), 0); + } + + // Second create should fail + { + context.last_blockhash = client + .get_new_latest_blockhash(&recent_blockhash) + .await + .unwrap(); + assert_ix_error( + &mut context, + create_lookup_table_ix, + Some(&authority_keypair), + InstructionError::AccountAlreadyInitialized, + ) + .await; + } +} + +#[tokio::test] +async fn test_create_lookup_table_use_payer_as_authority() { + let mut context = setup_test_context().await; + + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let authority_address = payer.pubkey(); + let transaction = Transaction::new_signed_with_payer( + &[create_lookup_table(authority_address, payer.pubkey(), test_recent_slot).0], + Some(&payer.pubkey()), + &[payer], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); +} + +#[tokio::test] +async fn test_create_lookup_table_without_signer() { + let mut context = setup_test_context().await; + let unsigned_authority_address = Pubkey::new_unique(); + + let mut ix = create_lookup_table( + unsigned_authority_address, + context.payer.pubkey(), + Slot::MAX, + ) + .0; + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} + +#[tokio::test] +async fn test_create_lookup_table_not_recent_slot() { + let mut context = setup_test_context().await; + let payer = &context.payer; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + + let ix = create_lookup_table(authority_address, payer.pubkey(), Slot::MAX).0; + + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidInstructionData, + ) + .await; +} + +#[tokio::test] +async fn test_create_lookup_table_pda_mismatch() { + let mut context = setup_test_context().await; + let test_recent_slot = 123; + overwrite_slot_hashes_with_slots(&mut context, &[test_recent_slot]); + let payer = &context.payer; + let authority_keypair = Keypair::new(); + let authority_address = authority_keypair.pubkey(); + + let mut ix = create_lookup_table(authority_address, payer.pubkey(), test_recent_slot).0; + ix.accounts[0].pubkey = Pubkey::new_unique(); + + assert_ix_error( + &mut context, + ix, + Some(&authority_keypair), + InstructionError::InvalidArgument, + ) + .await; +} diff --git a/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs new file mode 100644 index 00000000000000..81050aca123150 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/deactivate_lookup_table_ix.rs @@ -0,0 +1,145 @@ +use { + assert_matches::assert_matches, + common::{ + add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, + }, + solana_address_lookup_table_program::{ + instruction::deactivate_lookup_table, state::AddressLookupTable, + }, + solana_program_test::*, + solana_sdk::{ + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_deactivate_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let mut initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account( + &mut context, + lookup_table_address, + initialized_table.clone(), + ) + .await; + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let transaction = Transaction::new_signed_with_payer( + &[deactivate_lookup_table( + lookup_table_address, + authority.pubkey(), + )], + Some(&payer.pubkey()), + &[payer, &authority], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + let table_account = client + .get_account(lookup_table_address) + .await + .unwrap() + .unwrap(); + let lookup_table = AddressLookupTable::deserialize(&table_account.data).unwrap(); + assert_eq!(lookup_table.meta.deactivation_slot, 1); + + // Check that only the deactivation slot changed + initialized_table.meta.deactivation_slot = 1; + assert_eq!(initialized_table, lookup_table); +} + +#[tokio::test] +async fn test_deactivate_immutable_lookup_table() { + let mut context = setup_test_context().await; + + let initialized_table = new_address_lookup_table(None, 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let authority = Keypair::new(); + let ix = deactivate_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::Immutable, + ) + .await; +} + +#[tokio::test] +async fn test_deactivate_already_deactivated() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = { + let mut table = new_address_lookup_table(Some(authority.pubkey()), 0); + table.meta.deactivation_slot = 0; + table + }; + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = deactivate_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_deactivate_lookup_table_with_wrong_authority() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let wrong_authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = deactivate_lookup_table(lookup_table_address, wrong_authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&wrong_authority), + InstructionError::IncorrectAuthority, + ) + .await; +} + +#[tokio::test] +async fn test_deactivate_lookup_table_without_signing() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let mut ix = deactivate_lookup_table(lookup_table_address, authority.pubkey()); + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} diff --git a/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs new file mode 100644 index 00000000000000..576895a0c44a44 --- /dev/null +++ b/programs/address-lookup-table-tests/tests/extend_lookup_table_ix.rs @@ -0,0 +1,268 @@ +use { + assert_matches::assert_matches, + common::{ + add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, + }, + solana_address_lookup_table_program::{ + instruction::extend_lookup_table, + state::{AddressLookupTable, LookupTableMeta}, + }, + solana_program_test::*, + solana_sdk::{ + account::ReadableAccount, + instruction::Instruction, + instruction::InstructionError, + pubkey::{Pubkey, PUBKEY_BYTES}, + signature::{Keypair, Signer}, + transaction::{Transaction, TransactionError}, + }, + std::borrow::Cow, + std::result::Result, +}; + +mod common; + +struct ExpectedTableAccount { + lamports: u64, + data_len: usize, + state: AddressLookupTable<'static>, +} + +struct TestCase<'a> { + lookup_table_address: Pubkey, + instruction: Instruction, + extra_signer: Option<&'a Keypair>, + expected_result: Result, +} + +async fn run_test_case(context: &mut ProgramTestContext, test_case: TestCase<'_>) { + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + + let mut signers = vec![payer]; + if let Some(extra_signer) = test_case.extra_signer { + signers.push(extra_signer); + } + + let transaction = Transaction::new_signed_with_payer( + &[test_case.instruction], + Some(&payer.pubkey()), + &signers, + recent_blockhash, + ); + + let process_result = client.process_transaction(transaction).await; + + match test_case.expected_result { + Ok(expected_account) => { + assert_matches!(process_result, Ok(())); + + let table_account = client + .get_account(test_case.lookup_table_address) + .await + .unwrap() + .unwrap(); + + let lookup_table = AddressLookupTable::deserialize(&table_account.data).unwrap(); + assert_eq!(lookup_table, expected_account.state); + assert_eq!(table_account.lamports(), expected_account.lamports); + assert_eq!(table_account.data().len(), expected_account.data_len); + } + Err(expected_err) => { + assert_eq!( + process_result.unwrap_err().unwrap(), + TransactionError::InstructionError(0, expected_err), + ); + } + } +} + +#[tokio::test] +async fn test_extend_lookup_table() { + let mut context = setup_test_context().await; + let authority = Keypair::new(); + let current_bank_slot = 1; + let rent = context.banks_client.get_rent().await.unwrap(); + + for extend_same_slot in [true, false] { + for (num_existing_addresses, num_new_addresses, expected_result) in [ + (0, 0, Err(InstructionError::InvalidInstructionData)), + (0, 1, Ok(())), + (0, 10, Ok(())), + (1, 1, Ok(())), + (1, 10, Ok(())), + (255, 1, Ok(())), + (255, 2, Err(InstructionError::InvalidInstructionData)), + (246, 10, Ok(())), + (256, 1, Err(InstructionError::InvalidArgument)), + ] { + let mut lookup_table = + new_address_lookup_table(Some(authority.pubkey()), num_existing_addresses); + if extend_same_slot { + lookup_table.meta.last_extended_slot = current_bank_slot; + } + + let lookup_table_address = Pubkey::new_unique(); + let lookup_table_account = + add_lookup_table_account(&mut context, lookup_table_address, lookup_table.clone()) + .await; + + let mut new_addresses = Vec::with_capacity(num_new_addresses); + new_addresses.resize_with(num_new_addresses, Pubkey::new_unique); + let instruction = extend_lookup_table( + lookup_table_address, + authority.pubkey(), + context.payer.pubkey(), + new_addresses.clone(), + ); + + let mut expected_addresses: Vec = lookup_table.addresses.to_vec(); + expected_addresses.extend(new_addresses); + + let expected_result = expected_result.map(|_| { + let expected_data_len = + lookup_table_account.data().len() + num_new_addresses * PUBKEY_BYTES; + let expected_lamports = rent.minimum_balance(expected_data_len); + let expected_lookup_table = AddressLookupTable { + meta: LookupTableMeta { + last_extended_slot: current_bank_slot, + last_extended_slot_start_index: if extend_same_slot { + 0u8 + } else { + num_existing_addresses as u8 + }, + deactivation_slot: lookup_table.meta.deactivation_slot, + authority: lookup_table.meta.authority, + _padding: 0u16, + }, + addresses: Cow::Owned(expected_addresses), + }; + ExpectedTableAccount { + lamports: expected_lamports, + data_len: expected_data_len, + state: expected_lookup_table, + } + }); + + let test_case = TestCase { + lookup_table_address, + instruction, + extra_signer: Some(&authority), + expected_result, + }; + + run_test_case(&mut context, test_case).await; + } + } +} + +#[tokio::test] +async fn test_extend_lookup_table_with_wrong_authority() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let wrong_authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 0); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let new_addresses = vec![Pubkey::new_unique()]; + let ix = extend_lookup_table( + lookup_table_address, + wrong_authority.pubkey(), + context.payer.pubkey(), + new_addresses, + ); + + assert_ix_error( + &mut context, + ix, + Some(&wrong_authority), + InstructionError::IncorrectAuthority, + ) + .await; +} + +#[tokio::test] +async fn test_extend_lookup_table_without_signing() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let new_addresses = vec![Pubkey::new_unique()]; + let mut ix = extend_lookup_table( + lookup_table_address, + authority.pubkey(), + context.payer.pubkey(), + new_addresses, + ); + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} + +#[tokio::test] +async fn test_extend_deactivated_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = { + let mut table = new_address_lookup_table(Some(authority.pubkey()), 0); + table.meta.deactivation_slot = 0; + table + }; + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let new_addresses = vec![Pubkey::new_unique()]; + let ix = extend_lookup_table( + lookup_table_address, + authority.pubkey(), + context.payer.pubkey(), + new_addresses, + ); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_extend_immutable_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(None, 1); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let new_addresses = vec![Pubkey::new_unique()]; + let ix = extend_lookup_table( + lookup_table_address, + authority.pubkey(), + context.payer.pubkey(), + new_addresses, + ); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::Immutable, + ) + .await; +} diff --git a/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs b/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs new file mode 100644 index 00000000000000..bb169fda293b0f --- /dev/null +++ b/programs/address-lookup-table-tests/tests/freeze_lookup_table_ix.rs @@ -0,0 +1,165 @@ +use { + assert_matches::assert_matches, + common::{ + add_lookup_table_account, assert_ix_error, new_address_lookup_table, setup_test_context, + }, + solana_address_lookup_table_program::{ + instruction::freeze_lookup_table, state::AddressLookupTable, + }, + solana_program_test::*, + solana_sdk::{ + instruction::InstructionError, + pubkey::Pubkey, + signature::{Keypair, Signer}, + transaction::Transaction, + }, +}; + +mod common; + +#[tokio::test] +async fn test_freeze_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let mut initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account( + &mut context, + lookup_table_address, + initialized_table.clone(), + ) + .await; + + let client = &mut context.banks_client; + let payer = &context.payer; + let recent_blockhash = context.last_blockhash; + let transaction = Transaction::new_signed_with_payer( + &[freeze_lookup_table( + lookup_table_address, + authority.pubkey(), + )], + Some(&payer.pubkey()), + &[payer, &authority], + recent_blockhash, + ); + + assert_matches!(client.process_transaction(transaction).await, Ok(())); + let table_account = client + .get_account(lookup_table_address) + .await + .unwrap() + .unwrap(); + let lookup_table = AddressLookupTable::deserialize(&table_account.data).unwrap(); + assert_eq!(lookup_table.meta.authority, None); + + // Check that only the authority changed + initialized_table.meta.authority = None; + assert_eq!(initialized_table, lookup_table); +} + +#[tokio::test] +async fn test_freeze_immutable_lookup_table() { + let mut context = setup_test_context().await; + + let initialized_table = new_address_lookup_table(None, 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let authority = Keypair::new(); + let ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::Immutable, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_deactivated_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = { + let mut table = new_address_lookup_table(Some(authority.pubkey()), 10); + table.meta.deactivation_slot = 0; + table + }; + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::InvalidArgument, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_lookup_table_with_wrong_authority() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let wrong_authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = freeze_lookup_table(lookup_table_address, wrong_authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&wrong_authority), + InstructionError::IncorrectAuthority, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_lookup_table_without_signing() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 10); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let mut ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + ix.accounts[1].is_signer = false; + + assert_ix_error( + &mut context, + ix, + None, + InstructionError::MissingRequiredSignature, + ) + .await; +} + +#[tokio::test] +async fn test_freeze_empty_lookup_table() { + let mut context = setup_test_context().await; + + let authority = Keypair::new(); + let initialized_table = new_address_lookup_table(Some(authority.pubkey()), 0); + let lookup_table_address = Pubkey::new_unique(); + add_lookup_table_account(&mut context, lookup_table_address, initialized_table).await; + + let ix = freeze_lookup_table(lookup_table_address, authority.pubkey()); + + assert_ix_error( + &mut context, + ix, + Some(&authority), + InstructionError::InvalidInstructionData, + ) + .await; +} diff --git a/programs/address-lookup-table/Cargo.toml b/programs/address-lookup-table/Cargo.toml new file mode 100644 index 00000000000000..f1feab7f260435 --- /dev/null +++ b/programs/address-lookup-table/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "solana-address-lookup-table-program" +version = "1.9.4" +description = "Solana address lookup table program" +authors = ["Solana Maintainers "] +repository = "https://github.com/solana-labs/solana" +license = "Apache-2.0" +homepage = "https://solana.com/" +documentation = "https://docs.rs/solana-address-loookup-table-program" +edition = "2021" + +[dependencies] +bincode = "1.3.3" +bytemuck = "1.7.2" +log = "0.4.14" +num-derive = "0.3" +num-traits = "0.2" +serde = { version = "1.0.127", features = ["derive"] } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.4" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } +thiserror = "1.0" + +[build-dependencies] +rustc_version = "0.4" + +[lib] +crate-type = ["lib"] +name = "solana_address_lookup_table_program" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/address-lookup-table/build.rs b/programs/address-lookup-table/build.rs new file mode 120000 index 00000000000000..84539eddaa6ded --- /dev/null +++ b/programs/address-lookup-table/build.rs @@ -0,0 +1 @@ +../../frozen-abi/build.rs \ No newline at end of file diff --git a/programs/address-lookup-table/src/instruction.rs b/programs/address-lookup-table/src/instruction.rs new file mode 100644 index 00000000000000..5fca13b290e828 --- /dev/null +++ b/programs/address-lookup-table/src/instruction.rs @@ -0,0 +1,172 @@ +use { + crate::id, + serde::{Deserialize, Serialize}, + solana_sdk::{ + clock::Slot, + instruction::{AccountMeta, Instruction}, + pubkey::Pubkey, + system_program, + }, +}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub enum ProgramInstruction { + /// Create an address lookup table + /// + /// # Account references + /// 0. `[WRITE]` Uninitialized address lookup table account + /// 1. `[SIGNER]` Account used to derive and control the new address lookup table. + /// 2. `[SIGNER, WRITE]` Account that will fund the new address lookup table. + /// 3. `[]` System program for CPI. + CreateLookupTable { + /// A recent slot must be used in the derivation path + /// for each initialized table. When closing table accounts, + /// the initialization slot must no longer be "recent" to prevent + /// address tables from being recreated with reordered or + /// otherwise malicious addresses. + recent_slot: Slot, + /// Address tables are always initialized at program-derived + /// addresses using the funding address, recent blockhash, and + /// the user-passed `bump_seed`. + bump_seed: u8, + }, + + /// Permanently freeze an address lookup table, making it immutable. + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to freeze + /// 1. `[SIGNER]` Current authority + FreezeLookupTable, + + /// Extend an address lookup table with new addresses + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to extend + /// 1. `[SIGNER]` Current authority + /// 2. `[SIGNER, WRITE]` Account that will fund the table reallocation + /// 3. `[]` System program for CPI. + ExtendLookupTable { new_addresses: Vec }, + + /// Deactivate an address lookup table, making it unusable and + /// eligible for closure after a short period of time. + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to deactivate + /// 1. `[SIGNER]` Current authority + DeactivateLookupTable, + + /// Close an address lookup table account + /// + /// # Account references + /// 0. `[WRITE]` Address lookup table account to close + /// 1. `[SIGNER]` Current authority + /// 2. `[WRITE]` Recipient of closed account lamports + CloseLookupTable, +} + +/// Derives the address of an address table account from a wallet address and a recent block's slot. +pub fn derive_lookup_table_address( + authority_address: &Pubkey, + recent_block_slot: Slot, +) -> (Pubkey, u8) { + Pubkey::find_program_address( + &[authority_address.as_ref(), &recent_block_slot.to_le_bytes()], + &id(), + ) +} + +/// Constructs an instruction to create a table account and returns +/// the instruction and the table account's derived address. +pub fn create_lookup_table( + authority_address: Pubkey, + payer_address: Pubkey, + recent_slot: Slot, +) -> (Instruction, Pubkey) { + let (lookup_table_address, bump_seed) = + derive_lookup_table_address(&authority_address, recent_slot); + let instruction = Instruction::new_with_bincode( + id(), + &ProgramInstruction::CreateLookupTable { + recent_slot, + bump_seed, + }, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + AccountMeta::new(payer_address, true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ); + + (instruction, lookup_table_address) +} + +/// Constructs an instruction that freezes an address lookup +/// table so that it can never be closed or extended again. Empty +/// lookup tables cannot be frozen. +pub fn freeze_lookup_table(lookup_table_address: Pubkey, authority_address: Pubkey) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::FreezeLookupTable, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + ], + ) +} + +/// Constructs an instruction which extends an address lookup +/// table account with new addresses. +pub fn extend_lookup_table( + lookup_table_address: Pubkey, + authority_address: Pubkey, + payer_address: Pubkey, + new_addresses: Vec, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::ExtendLookupTable { new_addresses }, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + AccountMeta::new(payer_address, true), + AccountMeta::new_readonly(system_program::id(), false), + ], + ) +} + +/// Constructs an instruction that deactivates an address lookup +/// table so that it cannot be extended again and will be unusable +/// and eligible for closure after a short amount of time. +pub fn deactivate_lookup_table( + lookup_table_address: Pubkey, + authority_address: Pubkey, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::DeactivateLookupTable, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + ], + ) +} + +/// Returns an instruction that closes an address lookup table +/// account. The account will be deallocated and the lamports +/// will be drained to the recipient address. +pub fn close_lookup_table( + lookup_table_address: Pubkey, + authority_address: Pubkey, + recipient_address: Pubkey, +) -> Instruction { + Instruction::new_with_bincode( + id(), + &ProgramInstruction::CloseLookupTable, + vec![ + AccountMeta::new(lookup_table_address, false), + AccountMeta::new_readonly(authority_address, true), + AccountMeta::new(recipient_address, false), + ], + ) +} diff --git a/programs/address-lookup-table/src/lib.rs b/programs/address-lookup-table/src/lib.rs new file mode 100644 index 00000000000000..11433e64cabd0c --- /dev/null +++ b/programs/address-lookup-table/src/lib.rs @@ -0,0 +1,11 @@ +#![allow(incomplete_features)] +#![cfg_attr(RUSTC_WITH_SPECIALIZATION, feature(specialization))] +#![cfg_attr(RUSTC_NEEDS_PROC_MACRO_HYGIENE, feature(proc_macro_hygiene))] + +use solana_sdk::declare_id; + +pub mod instruction; +pub mod processor; +pub mod state; + +declare_id!("AddressLookupTab1e1111111111111111111111111"); diff --git a/programs/address-lookup-table/src/processor.rs b/programs/address-lookup-table/src/processor.rs new file mode 100644 index 00000000000000..11c7b57b4814ec --- /dev/null +++ b/programs/address-lookup-table/src/processor.rs @@ -0,0 +1,455 @@ +use { + crate::{ + instruction::ProgramInstruction, + state::{ + AddressLookupTable, LookupTableMeta, LookupTableStatus, ProgramState, + LOOKUP_TABLE_MAX_ADDRESSES, LOOKUP_TABLE_META_SIZE, + }, + }, + solana_program_runtime::{ic_msg, invoke_context::InvokeContext}, + solana_sdk::{ + account::{ReadableAccount, WritableAccount}, + account_utils::State, + clock::Slot, + instruction::InstructionError, + keyed_account::keyed_account_at_index, + program_utils::limited_deserialize, + pubkey::{Pubkey, PUBKEY_BYTES}, + slot_hashes::SlotHashes, + system_instruction, + sysvar::{ + clock::{self, Clock}, + rent::{self, Rent}, + slot_hashes, + }, + }, + std::convert::TryFrom, +}; + +pub fn process_instruction( + first_instruction_account: usize, + instruction_data: &[u8], + invoke_context: &mut InvokeContext, +) -> Result<(), InstructionError> { + match limited_deserialize(instruction_data)? { + ProgramInstruction::CreateLookupTable { + recent_slot, + bump_seed, + } => Processor::create_lookup_table( + invoke_context, + first_instruction_account, + recent_slot, + bump_seed, + ), + ProgramInstruction::FreezeLookupTable => { + Processor::freeze_lookup_table(invoke_context, first_instruction_account) + } + ProgramInstruction::ExtendLookupTable { new_addresses } => { + Processor::extend_lookup_table(invoke_context, first_instruction_account, new_addresses) + } + ProgramInstruction::DeactivateLookupTable => { + Processor::deactivate_lookup_table(invoke_context, first_instruction_account) + } + ProgramInstruction::CloseLookupTable => { + Processor::close_lookup_table(invoke_context, first_instruction_account) + } + } +} + +fn checked_add(a: usize, b: usize) -> Result { + a.checked_add(b).ok_or(InstructionError::ArithmeticOverflow) +} + +pub struct Processor; +impl Processor { + fn create_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + untrusted_recent_slot: Slot, + bump_seed: u8, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.data_len()? > 0 { + ic_msg!(invoke_context, "Table account must not be allocated"); + return Err(InstructionError::AccountAlreadyInitialized); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + let authority_key = *authority_account.signer_key().ok_or_else(|| { + ic_msg!(invoke_context, "Authority account must be a signer"); + InstructionError::MissingRequiredSignature + })?; + + let payer_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 2)?)?; + let payer_key = *payer_account.signer_key().ok_or_else(|| { + ic_msg!(invoke_context, "Payer account must be a signer"); + InstructionError::MissingRequiredSignature + })?; + + let derivation_slot = { + let slot_hashes: SlotHashes = invoke_context.get_sysvar(&slot_hashes::id())?; + if slot_hashes.get(&untrusted_recent_slot).is_some() { + Ok(untrusted_recent_slot) + } else { + ic_msg!( + invoke_context, + "{} is not a recent slot", + untrusted_recent_slot + ); + Err(InstructionError::InvalidInstructionData) + } + }?; + + // Use a derived address to ensure that an address table can never be + // initialized more than once at the same address. + let derived_table_key = Pubkey::create_program_address( + &[ + authority_key.as_ref(), + &derivation_slot.to_le_bytes(), + &[bump_seed], + ], + &crate::id(), + )?; + + let table_key = *lookup_table_account.unsigned_key(); + if table_key != derived_table_key { + ic_msg!( + invoke_context, + "Table address must match derived address: {}", + derived_table_key + ); + return Err(InstructionError::InvalidArgument); + } + + let table_account_data_len = LOOKUP_TABLE_META_SIZE; + let rent: Rent = invoke_context.get_sysvar(&rent::id())?; + let required_lamports = rent + .minimum_balance(table_account_data_len) + .max(1) + .saturating_sub(lookup_table_account.lamports()?); + + if required_lamports > 0 { + invoke_context.native_invoke( + system_instruction::transfer(&payer_key, &table_key, required_lamports), + &[payer_key], + )?; + } + + invoke_context.native_invoke( + system_instruction::allocate(&table_key, table_account_data_len as u64), + &[table_key], + )?; + + invoke_context.native_invoke( + system_instruction::assign(&table_key, &crate::id()), + &[table_key], + )?; + + let keyed_accounts = invoke_context.get_keyed_accounts()?; + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + lookup_table_account.set_state(&ProgramState::LookupTable(LookupTableMeta::new( + authority_key, + )))?; + + Ok(()) + } + + fn freeze_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + ic_msg!(invoke_context, "Lookup table is already frozen"); + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + if lookup_table.meta.deactivation_slot != Slot::MAX { + ic_msg!(invoke_context, "Deactivated tables cannot be frozen"); + return Err(InstructionError::InvalidArgument); + } + if lookup_table.addresses.is_empty() { + ic_msg!(invoke_context, "Empty lookup tables cannot be frozen"); + return Err(InstructionError::InvalidInstructionData); + } + + let mut lookup_table_meta = lookup_table.meta; + drop(lookup_table_account_ref); + + lookup_table_meta.authority = None; + AddressLookupTable::overwrite_meta_data( + lookup_table_account + .try_account_ref_mut()? + .data_as_mut_slice(), + lookup_table_meta, + )?; + + Ok(()) + } + + fn extend_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + new_addresses: Vec, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let payer_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 2)?)?; + let payer_key = if let Some(payer_key) = payer_account.signer_key() { + *payer_key + } else { + ic_msg!(invoke_context, "Payer account must be a signer"); + return Err(InstructionError::MissingRequiredSignature); + }; + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let mut lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + if lookup_table.meta.deactivation_slot != Slot::MAX { + ic_msg!(invoke_context, "Deactivated tables cannot be extended"); + return Err(InstructionError::InvalidArgument); + } + if lookup_table.addresses.len() >= LOOKUP_TABLE_MAX_ADDRESSES { + ic_msg!( + invoke_context, + "Lookup table is full and cannot contain more addresses" + ); + return Err(InstructionError::InvalidArgument); + } + + if new_addresses.is_empty() { + ic_msg!(invoke_context, "Must extend with at least one address"); + return Err(InstructionError::InvalidInstructionData); + } + + let new_table_addresses_len = lookup_table + .addresses + .len() + .saturating_add(new_addresses.len()); + if new_table_addresses_len > LOOKUP_TABLE_MAX_ADDRESSES { + ic_msg!( + invoke_context, + "Extended lookup table length {} would exceed max capacity of {}", + new_table_addresses_len, + LOOKUP_TABLE_MAX_ADDRESSES + ); + return Err(InstructionError::InvalidInstructionData); + } + + let clock: Clock = invoke_context.get_sysvar(&clock::id())?; + if clock.slot != lookup_table.meta.last_extended_slot { + lookup_table.meta.last_extended_slot = clock.slot; + lookup_table.meta.last_extended_slot_start_index = + u8::try_from(lookup_table.addresses.len()).map_err(|_| { + // This is impossible as long as the length of new_addresses + // is non-zero and LOOKUP_TABLE_MAX_ADDRESSES == u8::MAX + 1. + InstructionError::InvalidAccountData + })?; + } + + let lookup_table_meta = lookup_table.meta; + drop(lookup_table_account_ref); + + let new_table_data_len = checked_add( + LOOKUP_TABLE_META_SIZE, + new_table_addresses_len.saturating_mul(PUBKEY_BYTES), + )?; + + { + let mut lookup_table_account_ref_mut = lookup_table_account.try_account_ref_mut()?; + AddressLookupTable::overwrite_meta_data( + lookup_table_account_ref_mut.data_as_mut_slice(), + lookup_table_meta, + )?; + + let table_data = lookup_table_account_ref_mut.data_mut(); + for new_address in new_addresses { + table_data.extend_from_slice(new_address.as_ref()); + } + } + + let rent: Rent = invoke_context.get_sysvar(&rent::id())?; + let required_lamports = rent + .minimum_balance(new_table_data_len) + .max(1) + .saturating_sub(lookup_table_account.lamports()?); + + let table_key = *lookup_table_account.unsigned_key(); + if required_lamports > 0 { + invoke_context.native_invoke( + system_instruction::transfer(&payer_key, &table_key, required_lamports), + &[payer_key], + )?; + } + + Ok(()) + } + + fn deactivate_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + ic_msg!(invoke_context, "Lookup table is frozen"); + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + if lookup_table.meta.deactivation_slot != Slot::MAX { + ic_msg!(invoke_context, "Lookup table is already deactivated"); + return Err(InstructionError::InvalidArgument); + } + + let mut lookup_table_meta = lookup_table.meta; + drop(lookup_table_account_ref); + + let clock: Clock = invoke_context.get_sysvar(&clock::id())?; + lookup_table_meta.deactivation_slot = clock.slot; + + AddressLookupTable::overwrite_meta_data( + lookup_table_account + .try_account_ref_mut()? + .data_as_mut_slice(), + lookup_table_meta, + )?; + + Ok(()) + } + + fn close_lookup_table( + invoke_context: &mut InvokeContext, + first_instruction_account: usize, + ) -> Result<(), InstructionError> { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + + let lookup_table_account = + keyed_account_at_index(keyed_accounts, first_instruction_account)?; + if lookup_table_account.owner()? != crate::id() { + return Err(InstructionError::InvalidAccountOwner); + } + + let authority_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 1)?)?; + if authority_account.signer_key().is_none() { + return Err(InstructionError::MissingRequiredSignature); + } + + let recipient_account = + keyed_account_at_index(keyed_accounts, checked_add(first_instruction_account, 2)?)?; + if recipient_account.unsigned_key() == lookup_table_account.unsigned_key() { + ic_msg!( + invoke_context, + "Lookup table cannot be the recipient of reclaimed lamports" + ); + return Err(InstructionError::InvalidArgument); + } + + let lookup_table_account_ref = lookup_table_account.try_account_ref()?; + let lookup_table_data = lookup_table_account_ref.data(); + let lookup_table = AddressLookupTable::deserialize(lookup_table_data)?; + + if lookup_table.meta.authority.is_none() { + ic_msg!(invoke_context, "Lookup table is frozen"); + return Err(InstructionError::Immutable); + } + if lookup_table.meta.authority != Some(*authority_account.unsigned_key()) { + return Err(InstructionError::IncorrectAuthority); + } + + let clock: Clock = invoke_context.get_sysvar(&clock::id())?; + let slot_hashes: SlotHashes = invoke_context.get_sysvar(&slot_hashes::id())?; + + match lookup_table.meta.status(clock.slot, &slot_hashes) { + LookupTableStatus::Activated => { + ic_msg!(invoke_context, "Lookup table is not deactivated"); + Err(InstructionError::InvalidArgument) + } + LookupTableStatus::Deactivating { remaining_blocks } => { + ic_msg!( + invoke_context, + "Table cannot be closed until it's fully deactivated in {} blocks", + remaining_blocks + ); + Err(InstructionError::InvalidArgument) + } + LookupTableStatus::Deactivated => Ok(()), + }?; + + drop(lookup_table_account_ref); + + let withdrawn_lamports = lookup_table_account.lamports()?; + recipient_account + .try_account_ref_mut()? + .checked_add_lamports(withdrawn_lamports)?; + + let mut lookup_table_account = lookup_table_account.try_account_ref_mut()?; + lookup_table_account.set_data(Vec::new()); + lookup_table_account.set_lamports(0); + + Ok(()) + } +} diff --git a/programs/address-lookup-table/src/state.rs b/programs/address-lookup-table/src/state.rs new file mode 100644 index 00000000000000..8bf7fc3457a8ed --- /dev/null +++ b/programs/address-lookup-table/src/state.rs @@ -0,0 +1,325 @@ +use { + serde::{Deserialize, Serialize}, + solana_frozen_abi_macro::{AbiEnumVisitor, AbiExample}, + solana_sdk::{ + clock::Slot, + instruction::InstructionError, + pubkey::Pubkey, + slot_hashes::{SlotHashes, MAX_ENTRIES}, + }, + std::borrow::Cow, +}; + +/// The maximum number of addresses that a lookup table can hold +pub const LOOKUP_TABLE_MAX_ADDRESSES: usize = 256; + +/// The serialized size of lookup table metadata +pub const LOOKUP_TABLE_META_SIZE: usize = 56; + +/// Program account states +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, AbiExample, AbiEnumVisitor)] +#[allow(clippy::large_enum_variant)] +pub enum ProgramState { + /// Account is not initialized. + Uninitialized, + /// Initialized `LookupTable` account. + LookupTable(LookupTableMeta), +} + +/// Activation status of a lookup table +#[derive(Debug, PartialEq, Clone)] +pub enum LookupTableStatus { + Activated, + Deactivating { remaining_blocks: usize }, + Deactivated, +} + +/// Address lookup table metadata +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone, AbiExample)] +pub struct LookupTableMeta { + /// Lookup tables cannot be closed until the deactivation slot is + /// no longer "recent" (not accessible in the `SlotHashes` sysvar). + pub deactivation_slot: Slot, + /// The slot that the table was last extended. Address tables may + /// only be used to lookup addresses that were extended before + /// the current bank's slot. + pub last_extended_slot: Slot, + /// The start index where the table was last extended from during + /// the `last_extended_slot`. + pub last_extended_slot_start_index: u8, + /// Authority address which must sign for each modification. + pub authority: Option, + // Padding to keep addresses 8-byte aligned + pub _padding: u16, + // Raw list of addresses follows this serialized structure in + // the account's data, starting from `LOOKUP_TABLE_META_SIZE`. +} + +impl Default for LookupTableMeta { + fn default() -> Self { + Self { + deactivation_slot: Slot::MAX, + last_extended_slot: 0, + last_extended_slot_start_index: 0, + authority: None, + _padding: 0, + } + } +} + +impl LookupTableMeta { + pub fn new(authority: Pubkey) -> Self { + LookupTableMeta { + authority: Some(authority), + ..LookupTableMeta::default() + } + } + + /// Returns whether the table is considered active for address lookups + pub fn is_active(&self, current_slot: Slot, slot_hashes: &SlotHashes) -> bool { + match self.status(current_slot, slot_hashes) { + LookupTableStatus::Activated => true, + LookupTableStatus::Deactivating { .. } => true, + LookupTableStatus::Deactivated => false, + } + } + + /// Return the current status of the lookup table + pub fn status(&self, current_slot: Slot, slot_hashes: &SlotHashes) -> LookupTableStatus { + if self.deactivation_slot == Slot::MAX { + LookupTableStatus::Activated + } else if self.deactivation_slot == current_slot { + LookupTableStatus::Deactivating { + remaining_blocks: MAX_ENTRIES.saturating_add(1), + } + } else if let Some(slot_hash_position) = slot_hashes.position(&self.deactivation_slot) { + // Deactivation requires a cool-down period to give in-flight transactions + // enough time to land and to remove indeterminism caused by transactions loading + // addresses in the same slot when a table is closed. The cool-down period is + // equivalent to the amount of time it takes for a slot to be removed from the + // slot hash list. + // + // By using the slot hash to enforce the cool-down, there is a side effect + // of not allowing lookup tables to be recreated at the same derived address + // because tables must be created at an address derived from a recent slot. + LookupTableStatus::Deactivating { + remaining_blocks: MAX_ENTRIES.saturating_sub(slot_hash_position), + } + } else { + LookupTableStatus::Deactivated + } + } +} + +#[derive(Debug, PartialEq, Clone, AbiExample)] +pub struct AddressLookupTable<'a> { + pub meta: LookupTableMeta, + pub addresses: Cow<'a, [Pubkey]>, +} + +impl<'a> AddressLookupTable<'a> { + /// Serialize an address table's updated meta data and zero + /// any leftover bytes. + pub fn overwrite_meta_data( + data: &mut [u8], + lookup_table_meta: LookupTableMeta, + ) -> Result<(), InstructionError> { + let meta_data = data + .get_mut(0..LOOKUP_TABLE_META_SIZE) + .ok_or(InstructionError::InvalidAccountData)?; + meta_data.fill(0); + bincode::serialize_into(meta_data, &ProgramState::LookupTable(lookup_table_meta)) + .map_err(|_| InstructionError::GenericError)?; + Ok(()) + } + + /// Serialize an address table including its addresses + pub fn serialize_for_tests(self, data: &mut Vec) -> Result<(), InstructionError> { + data.resize(LOOKUP_TABLE_META_SIZE, 0); + Self::overwrite_meta_data(data, self.meta)?; + self.addresses.iter().for_each(|address| { + data.extend_from_slice(address.as_ref()); + }); + Ok(()) + } + + /// Efficiently deserialize an address table without allocating + /// for stored addresses. + pub fn deserialize(data: &'a [u8]) -> Result, InstructionError> { + let program_state: ProgramState = + bincode::deserialize(data).map_err(|_| InstructionError::InvalidAccountData)?; + + let meta = match program_state { + ProgramState::LookupTable(meta) => Ok(meta), + ProgramState::Uninitialized => Err(InstructionError::UninitializedAccount), + }?; + + let raw_addresses_data = data.get(LOOKUP_TABLE_META_SIZE..).ok_or({ + // Should be impossible because table accounts must + // always be LOOKUP_TABLE_META_SIZE in length + InstructionError::InvalidAccountData + })?; + let addresses: &[Pubkey] = bytemuck::try_cast_slice(raw_addresses_data).map_err(|_| { + // Should be impossible because raw address data + // should be aligned and sized in multiples of 32 bytes + InstructionError::InvalidAccountData + })?; + + Ok(Self { + meta, + addresses: Cow::Borrowed(addresses), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use solana_sdk::hash::Hash; + + impl AddressLookupTable<'_> { + fn new_for_tests(meta: LookupTableMeta, num_addresses: usize) -> Self { + let mut addresses = Vec::with_capacity(num_addresses); + addresses.resize_with(num_addresses, Pubkey::new_unique); + AddressLookupTable { + meta, + addresses: Cow::Owned(addresses), + } + } + } + + impl LookupTableMeta { + fn new_for_tests() -> Self { + Self { + authority: Some(Pubkey::new_unique()), + ..LookupTableMeta::default() + } + } + } + + #[test] + fn test_lookup_table_meta_size() { + let lookup_table = ProgramState::LookupTable(LookupTableMeta::new_for_tests()); + let meta_size = bincode::serialized_size(&lookup_table).unwrap(); + assert!(meta_size as usize <= LOOKUP_TABLE_META_SIZE); + assert_eq!(meta_size as usize, 56); + + let lookup_table = ProgramState::LookupTable(LookupTableMeta::default()); + let meta_size = bincode::serialized_size(&lookup_table).unwrap(); + assert!(meta_size as usize <= LOOKUP_TABLE_META_SIZE); + assert_eq!(meta_size as usize, 24); + } + + #[test] + fn test_lookup_table_meta_status() { + let mut slot_hashes = SlotHashes::default(); + for slot in 1..=MAX_ENTRIES as Slot { + slot_hashes.add(slot, Hash::new_unique()); + } + + let most_recent_slot = slot_hashes.first().unwrap().0; + let least_recent_slot = slot_hashes.last().unwrap().0; + assert!(least_recent_slot < most_recent_slot); + + // 10 was chosen because the current slot isn't necessarily the next + // slot after the most recent block + let current_slot = most_recent_slot + 10; + + let active_table = LookupTableMeta { + deactivation_slot: Slot::MAX, + ..LookupTableMeta::default() + }; + + let just_started_deactivating_table = LookupTableMeta { + deactivation_slot: current_slot, + ..LookupTableMeta::default() + }; + + let recently_started_deactivating_table = LookupTableMeta { + deactivation_slot: most_recent_slot, + ..LookupTableMeta::default() + }; + + let almost_deactivated_table = LookupTableMeta { + deactivation_slot: least_recent_slot, + ..LookupTableMeta::default() + }; + + let deactivated_table = LookupTableMeta { + deactivation_slot: least_recent_slot - 1, + ..LookupTableMeta::default() + }; + + assert_eq!( + active_table.status(current_slot, &slot_hashes), + LookupTableStatus::Activated + ); + assert_eq!( + just_started_deactivating_table.status(current_slot, &slot_hashes), + LookupTableStatus::Deactivating { + remaining_blocks: MAX_ENTRIES.saturating_add(1), + } + ); + assert_eq!( + recently_started_deactivating_table.status(current_slot, &slot_hashes), + LookupTableStatus::Deactivating { + remaining_blocks: MAX_ENTRIES, + } + ); + assert_eq!( + almost_deactivated_table.status(current_slot, &slot_hashes), + LookupTableStatus::Deactivating { + remaining_blocks: 1, + } + ); + assert_eq!( + deactivated_table.status(current_slot, &slot_hashes), + LookupTableStatus::Deactivated + ); + } + + #[test] + fn test_overwrite_meta_data() { + let meta = LookupTableMeta::new_for_tests(); + let empty_table = ProgramState::LookupTable(meta.clone()); + let mut serialized_table_1 = bincode::serialize(&empty_table).unwrap(); + serialized_table_1.resize(LOOKUP_TABLE_META_SIZE, 0); + + let address_table = AddressLookupTable::new_for_tests(meta, 0); + let mut serialized_table_2 = Vec::new(); + serialized_table_2.resize(LOOKUP_TABLE_META_SIZE, 0); + AddressLookupTable::overwrite_meta_data(&mut serialized_table_2, address_table.meta) + .unwrap(); + + assert_eq!(serialized_table_1, serialized_table_2); + } + + #[test] + fn test_deserialize() { + assert_eq!( + AddressLookupTable::deserialize(&[]).err(), + Some(InstructionError::InvalidAccountData), + ); + + assert_eq!( + AddressLookupTable::deserialize(&[0u8; LOOKUP_TABLE_META_SIZE]).err(), + Some(InstructionError::UninitializedAccount), + ); + + fn test_case(num_addresses: usize) { + let lookup_table_meta = LookupTableMeta::new_for_tests(); + let address_table = AddressLookupTable::new_for_tests(lookup_table_meta, num_addresses); + let mut address_table_data = Vec::new(); + AddressLookupTable::serialize_for_tests(address_table.clone(), &mut address_table_data) + .unwrap(); + assert_eq!( + AddressLookupTable::deserialize(&address_table_data).unwrap(), + address_table, + ); + } + + for case in [0, 1, 10, 255, 256] { + test_case(case); + } + } +} diff --git a/programs/bpf/Cargo.lock b/programs/bpf/Cargo.lock index 0cf4523d3556a7..35790b0e2bf698 100644 --- a/programs/bpf/Cargo.lock +++ b/programs/bpf/Cargo.lock @@ -449,6 +449,26 @@ dependencies = [ "winapi", ] +[[package]] +name = "console_error_panic_hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" +dependencies = [ + "cfg-if 1.0.0", + "wasm-bindgen", +] + +[[package]] +name = "console_log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501a375961cef1a0d44767200e66e4a559283097e91d0730b1d75dfb2f8a1494" +dependencies = [ + "log", + "web-sys", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -1004,7 +1024,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite", "pin-utils", "slab", ] @@ -1244,7 +1264,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.7", + "pin-project-lite", "socket2", "tokio", "tower-service", @@ -1350,9 +1370,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.49" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc15e39392125075f60c95ba416f5381ff6c3a948ff02ab12464715adf56c821" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -1700,11 +1720,12 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "opentelemetry" -version = "0.13.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91cea1dfd50064e52db033179952d18c770cbc5dfefc8eba45d619357ba3914" +checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" dependencies = [ "async-trait", + "crossbeam-channel", "futures", "js-sys", "lazy_static", @@ -1807,12 +1828,6 @@ dependencies = [ "syn 1.0.67", ] -[[package]] -name = "pin-project-lite" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7505eeebd78492e0f6108f7171c4948dbb120ee8119d9d77d0afa5469bef67f" - [[package]] name = "pin-project-lite" version = "0.2.7" @@ -2109,7 +2124,7 @@ dependencies = [ "log", "mime", "percent-encoding", - "pin-project-lite 0.2.7", + "pin-project-lite", "rustls 0.19.0", "serde", "serde_json", @@ -2442,7 +2457,7 @@ dependencies = [ [[package]] name = "solana-account-decoder" -version = "1.9.0" +version = "1.9.4" dependencies = [ "Inflector", "base64 0.12.3", @@ -2461,23 +2476,42 @@ dependencies = [ "zstd", ] +[[package]] +name = "solana-address-lookup-table-program" +version = "1.9.4" +dependencies = [ + "bincode", + "bytemuck", + "log", + "num-derive", + "num-traits", + "rustc_version 0.4.0", + "serde", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-program-runtime", + "solana-sdk", + "thiserror", +] + [[package]] name = "solana-banks-client" -version = "1.9.0" +version = "1.9.4" dependencies = [ "borsh", "futures", "solana-banks-interface", - "solana-program 1.9.0", + "solana-program 1.9.4", "solana-sdk", "tarpc", + "thiserror", "tokio", "tokio-serde", ] [[package]] name = "solana-banks-interface" -version = "1.9.0" +version = "1.9.4" dependencies = [ "serde", "solana-sdk", @@ -2486,7 +2520,7 @@ dependencies = [ [[package]] name = "solana-banks-server" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "futures", @@ -2502,13 +2536,12 @@ dependencies = [ [[package]] name = "solana-bpf-loader-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "byteorder 1.4.3", "libsecp256k1 0.6.0", "log", - "regex", "solana-measure", "solana-program-runtime", "solana-sdk", @@ -2518,7 +2551,7 @@ dependencies = [ [[package]] name = "solana-bpf-programs" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "byteorder 1.4.3", @@ -2533,7 +2566,7 @@ dependencies = [ "solana-bpf-rust-realloc", "solana-bpf-rust-realloc-invoke", "solana-cli-output", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-program-runtime", "solana-runtime", @@ -2545,170 +2578,170 @@ dependencies = [ [[package]] name = "solana-bpf-rust-128bit" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-bpf-rust-128bit-dep", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-128bit-dep" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-alloc" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-call-depth" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-caller-access" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-custom-heap" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-dep-crate" -version = "1.9.0" +version = "1.9.4" dependencies = [ "byteorder 1.4.3", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-deprecated-loader" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-dup-accounts" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-error-handling" -version = "1.9.0" +version = "1.9.4" dependencies = [ "num-derive", "num-traits", - "solana-program 1.9.0", + "solana-program 1.9.4", "thiserror", ] [[package]] name = "solana-bpf-rust-external-spend" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-finalize" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-instruction-introspection" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-invoke" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-bpf-rust-invoked", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-invoke-and-error" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-invoke-and-ok" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-invoke-and-return" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-invoked" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-iter" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-log-data" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-many-args" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-bpf-rust-many-args-dep", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-many-args-dep" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-mem" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", "solana-program-runtime", "solana-program-test", "solana-sdk", @@ -2716,84 +2749,84 @@ dependencies = [ [[package]] name = "solana-bpf-rust-membuiltins" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-bpf-rust-mem", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-noop" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-panic" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-param-passing" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-bpf-rust-param-passing-dep", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-param-passing-dep" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-rand" -version = "1.9.0" +version = "1.9.4" dependencies = [ "getrandom 0.1.14", "rand 0.7.3", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-realloc" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-realloc-invoke" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-bpf-rust-realloc", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-ro-account_modify" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-ro-modify" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-sanity" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", "solana-program-runtime", "solana-program-test", "solana-sdk", @@ -2801,38 +2834,38 @@ dependencies = [ [[package]] name = "solana-bpf-rust-secp256k1-recover" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-sha" -version = "1.9.0" +version = "1.9.4" dependencies = [ "blake3 1.2.0", - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-spoof1" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-spoof1-system" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-sysvar" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", "solana-program-runtime", "solana-program-test", "solana-sdk", @@ -2840,28 +2873,28 @@ dependencies = [ [[package]] name = "solana-bpf-rust-upgradeable" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bpf-rust-upgraded" -version = "1.9.0" +version = "1.9.4" dependencies = [ - "solana-program 1.9.0", + "solana-program 1.9.4", ] [[package]] name = "solana-bucket-map" -version = "1.9.0" +version = "1.9.4" dependencies = [ "fs_extra", "log", "memmap2 0.5.0", "rand 0.7.3", "rayon", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-measure", "solana-sdk", "tempfile", @@ -2869,7 +2902,7 @@ dependencies = [ [[package]] name = "solana-clap-utils" -version = "1.9.0" +version = "1.9.4" dependencies = [ "chrono", "clap", @@ -2885,7 +2918,7 @@ dependencies = [ [[package]] name = "solana-cli-config" -version = "1.9.0" +version = "1.9.4" dependencies = [ "dirs-next", "lazy_static", @@ -2897,7 +2930,7 @@ dependencies = [ [[package]] name = "solana-cli-output" -version = "1.9.0" +version = "1.9.4" dependencies = [ "Inflector", "base64 0.13.0", @@ -2919,7 +2952,7 @@ dependencies = [ [[package]] name = "solana-client" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.13.0", "bincode", @@ -2951,7 +2984,7 @@ dependencies = [ [[package]] name = "solana-compute-budget-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "solana-program-runtime", "solana-sdk", @@ -2959,7 +2992,7 @@ dependencies = [ [[package]] name = "solana-config-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "chrono", @@ -2971,7 +3004,7 @@ dependencies = [ [[package]] name = "solana-faucet" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "byteorder 1.4.3", @@ -2981,7 +3014,7 @@ dependencies = [ "serde_derive", "solana-clap-utils", "solana-cli-config", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-sdk", "solana-version", @@ -3012,7 +3045,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "bv", @@ -3023,8 +3056,8 @@ dependencies = [ "serde", "serde_derive", "sha2", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "thiserror", ] @@ -3042,7 +3075,7 @@ dependencies = [ [[package]] name = "solana-frozen-abi-macro" -version = "1.9.0" +version = "1.9.4" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -3063,7 +3096,7 @@ dependencies = [ [[package]] name = "solana-logger" -version = "1.9.0" +version = "1.9.4" dependencies = [ "env_logger 0.9.0", "lazy_static", @@ -3072,7 +3105,7 @@ dependencies = [ [[package]] name = "solana-measure" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log", "solana-sdk", @@ -3080,7 +3113,7 @@ dependencies = [ [[package]] name = "solana-metrics" -version = "1.9.0" +version = "1.9.4" dependencies = [ "env_logger 0.9.0", "gethostname", @@ -3092,7 +3125,7 @@ dependencies = [ [[package]] name = "solana-net-utils" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "clap", @@ -3102,7 +3135,7 @@ dependencies = [ "serde", "serde_derive", "socket2", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-sdk", "solana-version", "tokio", @@ -3111,7 +3144,7 @@ dependencies = [ [[package]] name = "solana-perf" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "caps", @@ -3125,7 +3158,7 @@ dependencies = [ "rand 0.7.3", "rayon", "serde", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-rayon-threadlimit", "solana-sdk", @@ -3169,7 +3202,7 @@ dependencies = [ [[package]] name = "solana-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.13.0", "bincode", @@ -3180,8 +3213,12 @@ dependencies = [ "bs58 0.4.0", "bv", "bytemuck", + "console_error_panic_hook", + "console_log", "curve25519-dalek 3.2.0", + "getrandom 0.1.14", "itertools 0.10.1", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log", @@ -3196,16 +3233,17 @@ dependencies = [ "serde_derive", "sha2", "sha3", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", - "solana-sdk-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", + "solana-sdk-macro 1.9.4", "thiserror", + "wasm-bindgen", ] [[package]] name = "solana-program-runtime" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base64 0.13.0", "bincode", @@ -3217,14 +3255,14 @@ dependencies = [ "num-traits", "rustc_version 0.4.0", "serde", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-sdk", "thiserror", ] [[package]] name = "solana-program-test" -version = "1.9.0" +version = "1.9.4" dependencies = [ "async-trait", "base64 0.12.3", @@ -3235,7 +3273,7 @@ dependencies = [ "solana-banks-client", "solana-banks-server", "solana-bpf-loader-program", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-program-runtime", "solana-runtime", "solana-sdk", @@ -3246,7 +3284,7 @@ dependencies = [ [[package]] name = "solana-rayon-threadlimit" -version = "1.9.0" +version = "1.9.4" dependencies = [ "lazy_static", "num_cpus", @@ -3254,7 +3292,7 @@ dependencies = [ [[package]] name = "solana-remote-wallet" -version = "1.9.0" +version = "1.9.4" dependencies = [ "base32", "console", @@ -3273,7 +3311,7 @@ dependencies = [ [[package]] name = "solana-runtime" -version = "1.9.0" +version = "1.9.4" dependencies = [ "arrayref", "bincode", @@ -3290,6 +3328,8 @@ dependencies = [ "lazy_static", "log", "memmap2 0.5.0", + "num-derive", + "num-traits", "num_cpus", "ouroboros", "rand 0.7.3", @@ -3298,12 +3338,13 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", + "solana-address-lookup-table-program", "solana-bucket-map", "solana-compute-budget-program", "solana-config-program", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "solana-measure", "solana-metrics", "solana-program-runtime", @@ -3320,11 +3361,12 @@ dependencies = [ [[package]] name = "solana-sdk" -version = "1.9.0" +version = "1.9.4" dependencies = [ "assert_matches", "base64 0.13.0", "bincode", + "bitflags", "borsh", "bs58 0.4.0", "bytemuck", @@ -3337,6 +3379,7 @@ dependencies = [ "generic-array 0.14.4", "hmac 0.11.0", "itertools 0.10.1", + "js-sys", "lazy_static", "libsecp256k1 0.6.0", "log", @@ -3355,13 +3398,14 @@ dependencies = [ "serde_json", "sha2", "sha3", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", - "solana-program 1.9.0", - "solana-sdk-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", + "solana-program 1.9.4", + "solana-sdk-macro 1.9.4", "thiserror", "uriparse", + "wasm-bindgen", ] [[package]] @@ -3379,7 +3423,7 @@ dependencies = [ [[package]] name = "solana-sdk-macro" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bs58 0.4.0", "proc-macro2 1.0.24", @@ -3390,10 +3434,10 @@ dependencies = [ [[package]] name = "solana-send-transaction-service" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log", - "solana-logger 1.9.0", + "solana-logger 1.9.4", "solana-metrics", "solana-runtime", "solana-sdk", @@ -3401,7 +3445,7 @@ dependencies = [ [[package]] name = "solana-stake-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "log", @@ -3411,8 +3455,8 @@ dependencies = [ "serde", "serde_derive", "solana-config-program", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", "solana-metrics", "solana-program-runtime", "solana-sdk", @@ -3422,7 +3466,7 @@ dependencies = [ [[package]] name = "solana-transaction-status" -version = "1.9.0" +version = "1.9.4" dependencies = [ "Inflector", "base64 0.12.3", @@ -3447,20 +3491,20 @@ dependencies = [ [[package]] name = "solana-version" -version = "1.9.0" +version = "1.9.4" dependencies = [ "log", "rustc_version 0.4.0", "serde", "serde_derive", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", "solana-sdk", ] [[package]] name = "solana-vote-program" -version = "1.9.0" +version = "1.9.4" dependencies = [ "bincode", "log", @@ -3469,9 +3513,9 @@ dependencies = [ "rustc_version 0.4.0", "serde", "serde_derive", - "solana-frozen-abi 1.9.0", - "solana-frozen-abi-macro 1.9.0", - "solana-logger 1.9.0", + "solana-frozen-abi 1.9.4", + "solana-frozen-abi-macro 1.9.4", + "solana-logger 1.9.4", "solana-metrics", "solana-program-runtime", "solana-sdk", @@ -3480,9 +3524,9 @@ dependencies = [ [[package]] name = "solana_rbpf" -version = "0.2.16" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3af7860a2bf51e63a07c4098966b1c80e8cbfdab3cf4ac36aac7fdd80ea1094c" +checksum = "8fb565d026461ba89d1d92cc36cf0882fba44076559c3bbed1e8a9888112b3d7" dependencies = [ "byteorder 1.4.3", "combine", @@ -3613,9 +3657,9 @@ dependencies = [ [[package]] name = "tarpc" -version = "0.26.2" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82cb992a07637db1bcc0e4511d0c58c3f3a03f509d7c6cc2826f7646deac2032" +checksum = "b85d0a9369a919ba0db919b142a2b704cd207dfc676f7a43c2d105d0bc225487" dependencies = [ "anyhow", "fnv", @@ -3637,9 +3681,9 @@ dependencies = [ [[package]] name = "tarpc-plugins" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea80818e6c75f81d961d7426c1b938cbea6b3a51533b5ee71b61f82166b7ef3d" +checksum = "0ee42b4e559f17bce0385ebf511a7beb67d5cc33c12c96b7f4e9789919d9c10f" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -3775,7 +3819,7 @@ dependencies = [ "num_cpus", "once_cell", "parking_lot", - "pin-project-lite 0.2.7", + "pin-project-lite", "signal-hook-registry", "tokio-macros", "winapi", @@ -3816,6 +3860,7 @@ dependencies = [ "futures-sink", "pin-project", "serde", + "serde_json", ] [[package]] @@ -3825,7 +3870,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", - "pin-project-lite 0.2.7", + "pin-project-lite", "tokio", ] @@ -3839,7 +3884,7 @@ dependencies = [ "futures-core", "futures-sink", "log", - "pin-project-lite 0.2.7", + "pin-project-lite", "slab", "tokio", ] @@ -3861,22 +3906,22 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.21" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "log", - "pin-project-lite 0.1.5", + "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -3885,18 +3930,18 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] [[package]] name = "tracing-opentelemetry" -version = "0.12.0" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99003208b647dae59dcefc49c98aecaa3512fbc29351685d4b9ef23a9218458e" +checksum = "599f388ecb26b28d9c1b2e4437ae019a7b336018b45ed911458cd9ebf91129f6" dependencies = [ "opentelemetry", "tracing", @@ -4081,9 +4126,9 @@ checksum = "93c6c3420963c5c64bca373b25e77acb562081b9bb4dd5bb864187742186cea9" [[package]] name = "wasm-bindgen" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe8f61dba8e5d645a4d8132dc7a0a66861ed5e1045d2c0ed940fab33bac0fbe" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4091,9 +4136,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046ceba58ff062da072c7cb4ba5b22a37f00a302483f7e2a6cdc18fedbdc1fd3" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", @@ -4118,9 +4163,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9aa01d36cda046f797c57959ff5f3c615c9cc63997a8d545831ec7976819b" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote 1.0.6", "wasm-bindgen-macro-support", @@ -4128,9 +4173,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96eb45c1b2ee33545a813a92dbb53856418bf7eb54ab34f7f7ff1448a5b3735d" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2 1.0.24", "quote 1.0.6", @@ -4141,9 +4186,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.72" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7148f4696fb4960a346eaa60bbfb42a1ac4ebba21f750f75fc1375b098d5ffa" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" diff --git a/programs/bpf/Cargo.toml b/programs/bpf/Cargo.toml index 60d56a869571be..4eaab415ba7687 100644 --- a/programs/bpf/Cargo.toml +++ b/programs/bpf/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-bpf-programs" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" documentation = "https://docs.rs/solana" homepage = "https://solana.com/" readme = "README.md" @@ -26,19 +26,19 @@ itertools = "0.10.1" log = "0.4.11" miow = "0.3.6" net2 = "0.2.37" -solana-bpf-rust-invoke = { path = "rust/invoke", version = "=1.9.0"} -solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.9.0"} -solana-bpf-rust-realloc = { path = "rust/realloc", version = "=1.9.0"} -solana-bpf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.9.0"} -solana-cli-output = { path = "../../cli-output", version = "=1.9.0" } -solana-logger = { path = "../../logger", version = "=1.9.0" } -solana-measure = { path = "../../measure", version = "=1.9.0" } -solana_rbpf = "=0.2.16" -solana-runtime = { path = "../../runtime", version = "=1.9.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../../transaction-status", version = "=1.9.0" } -solana-account-decoder = { path = "../../account-decoder", version = "=1.9.0" } +solana-bpf-rust-invoke = { path = "rust/invoke", version = "=1.9.4"} +solana-bpf-loader-program = { path = "../bpf_loader", version = "=1.9.4"} +solana-bpf-rust-realloc = { path = "rust/realloc", version = "=1.9.4"} +solana-bpf-rust-realloc-invoke = { path = "rust/realloc_invoke", version = "=1.9.4"} +solana-cli-output = { path = "../../cli-output", version = "=1.9.4" } +solana-logger = { path = "../../logger", version = "=1.9.4" } +solana-measure = { path = "../../measure", version = "=1.9.4" } +solana_rbpf = "=0.2.21" +solana-runtime = { path = "../../runtime", version = "=1.9.4" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../../transaction-status", version = "=1.9.4" } +solana-account-decoder = { path = "../../account-decoder", version = "=1.9.4" } [[bench]] name = "bpf_loader" diff --git a/programs/bpf/benches/bpf_loader.rs b/programs/bpf/benches/bpf_loader.rs index 97b9a1f647b97b..63513f6a77f1ef 100644 --- a/programs/bpf/benches/bpf_loader.rs +++ b/programs/bpf/benches/bpf_loader.rs @@ -110,7 +110,7 @@ fn bench_program_alu(bencher: &mut Bencher) { register_syscalls(invoke_context).unwrap(), ) .unwrap(); - executable.jit_compile().unwrap(); + Executable::::jit_compile(&mut executable).unwrap(); let compute_meter = invoke_context.get_compute_meter(); let mut instruction_meter = ThisInstructionMeter { compute_meter }; let mut vm = create_vm(&executable, &mut inner_iter, invoke_context, &[]).unwrap(); diff --git a/programs/bpf/rust/128bit/Cargo.toml b/programs/bpf/rust/128bit/Cargo.toml index 57bda245cbb3ae..e3f25156a71d80 100644 --- a/programs/bpf/rust/128bit/Cargo.toml +++ b/programs/bpf/rust/128bit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } -solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } +solana-bpf-rust-128bit-dep = { path = "../128bit_dep", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/128bit_dep/Cargo.toml b/programs/bpf/rust/128bit_dep/Cargo.toml index a9f79dfc1a80a9..15571496779688 100644 --- a/programs/bpf/rust/128bit_dep/Cargo.toml +++ b/programs/bpf/rust/128bit_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-128bit-dep" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-128bit-dep" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/alloc/Cargo.toml b/programs/bpf/rust/alloc/Cargo.toml index 43a9b42b5d0ab8..9d8b52f323dba6 100644 --- a/programs/bpf/rust/alloc/Cargo.toml +++ b/programs/bpf/rust/alloc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-alloc" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-alloc" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/alloc/src/lib.rs b/programs/bpf/rust/alloc/src/lib.rs index 9f59d6aa9a97f8..5e98c4a13be465 100644 --- a/programs/bpf/rust/alloc/src/lib.rs +++ b/programs/bpf/rust/alloc/src/lib.rs @@ -2,7 +2,7 @@ #[macro_use] extern crate alloc; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64, msg}; use std::{alloc::Layout, mem}; #[no_mangle] @@ -46,7 +46,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for i in 0..ITERS { assert_eq!(*ptr.add(i as usize), i as u8); } - msg!(0x3, 0, 0, 0, u64::from(*ptr.add(42))); + sol_log_64(0x3, 0, 0, 0, u64::from(*ptr.add(42))); assert_eq!(*ptr.add(42), 42); alloc::alloc::dealloc(ptr, layout); } @@ -61,7 +61,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for v in ones.iter() { sum += ones[*v]; } - msg!(0x0, 0, 0, 0, sum as u64); + sol_log_64(0x0, 0, 0, 0, sum as u64); assert_eq!(sum, ITERS); } @@ -74,7 +74,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for i in 0..ITERS { v.push(i); } - msg!(0x4, 0, 0, 0, v.len() as u64); + sol_log_64(0x4, 0, 0, 0, v.len() as u64); assert_eq!(v.len(), ITERS); } diff --git a/programs/bpf/rust/call_depth/Cargo.toml b/programs/bpf/rust/call_depth/Cargo.toml index 4dcd41a3ec6b1c..47c4e2da9c604e 100644 --- a/programs/bpf/rust/call_depth/Cargo.toml +++ b/programs/bpf/rust/call_depth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-call-depth" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-call-depth" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/call_depth/src/lib.rs b/programs/bpf/rust/call_depth/src/lib.rs index 7ab2c69b58bf8e..888c491d98a2f6 100644 --- a/programs/bpf/rust/call_depth/src/lib.rs +++ b/programs/bpf/rust/call_depth/src/lib.rs @@ -1,6 +1,6 @@ //! Example Rust-based BPF program that tests call depth and stack usage -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64, msg}; #[inline(never)] pub fn recurse(data: &mut [u8]) { @@ -8,7 +8,7 @@ pub fn recurse(data: &mut [u8]) { return; } recurse(&mut data[1..]); - msg!(line!(), 0, 0, 0, data[0]); + sol_log_64(line!() as u64, 0, 0, 0, data[0] as u64); } /// # Safety @@ -17,7 +17,7 @@ pub fn recurse(data: &mut [u8]) { pub unsafe extern "C" fn entrypoint(input: *mut u8) -> u64 { msg!("Call depth"); let depth = *(input.add(16) as *mut u8); - msg!(line!(), 0, 0, 0, depth); + sol_log_64(line!() as u64, 0, 0, 0, depth as u64); let mut data = Vec::with_capacity(depth as usize); for i in 0_u8..depth { data.push(i); diff --git a/programs/bpf/rust/caller_access/Cargo.toml b/programs/bpf/rust/caller_access/Cargo.toml index 343d329ca0c7c4..21b587fa4b9f7f 100644 --- a/programs/bpf/rust/caller_access/Cargo.toml +++ b/programs/bpf/rust/caller_access/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-caller-access" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-caller-access" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/custom_heap/Cargo.toml b/programs/bpf/rust/custom_heap/Cargo.toml index 259b45101ac24d..73442963436f29 100644 --- a/programs/bpf/rust/custom_heap/Cargo.toml +++ b/programs/bpf/rust/custom_heap/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-custom-heap" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-custom-heap" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [features] default = ["custom-heap"] diff --git a/programs/bpf/rust/dep_crate/Cargo.toml b/programs/bpf/rust/dep_crate/Cargo.toml index e0a673a98d315b..f3e4f7092cdf3b 100644 --- a/programs/bpf/rust/dep_crate/Cargo.toml +++ b/programs/bpf/rust/dep_crate/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dep-crate" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2021" [dependencies] byteorder = { version = "1", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/deprecated_loader/Cargo.toml b/programs/bpf/rust/deprecated_loader/Cargo.toml index 4cbd1c3e0fc591..19c8716f741d06 100644 --- a/programs/bpf/rust/deprecated_loader/Cargo.toml +++ b/programs/bpf/rust/deprecated_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-deprecated-loader" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-deprecated-loader" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/dup_accounts/Cargo.toml b/programs/bpf/rust/dup_accounts/Cargo.toml index 1982b8e70e0f2d..a720b2613fc3a5 100644 --- a/programs/bpf/rust/dup_accounts/Cargo.toml +++ b/programs/bpf/rust/dup_accounts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-dup-accounts" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-dup-accounts" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/error_handling/Cargo.toml b/programs/bpf/rust/error_handling/Cargo.toml index fd8317a08f8b02..5af16de68f768d 100644 --- a/programs/bpf/rust/error_handling/Cargo.toml +++ b/programs/bpf/rust/error_handling/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-error-handling" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2021" [dependencies] num-derive = "0.3" num-traits = "0.2" -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } thiserror = "1.0" [lib] diff --git a/programs/bpf/rust/external_spend/Cargo.toml b/programs/bpf/rust/external_spend/Cargo.toml index 09e959ee86f3a1..1b7b26146cbcad 100644 --- a/programs/bpf/rust/external_spend/Cargo.toml +++ b/programs/bpf/rust/external_spend/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-external-spend" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-external-spend" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/finalize/Cargo.toml b/programs/bpf/rust/finalize/Cargo.toml index 6440dc9c448c46..bf1baca307f7b9 100644 --- a/programs/bpf/rust/finalize/Cargo.toml +++ b/programs/bpf/rust/finalize/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-finalize" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-finalize" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/instruction_introspection/Cargo.toml b/programs/bpf/rust/instruction_introspection/Cargo.toml index b74502eec18179..9ee8ba2ff4c0b5 100644 --- a/programs/bpf/rust/instruction_introspection/Cargo.toml +++ b/programs/bpf/rust/instruction_introspection/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-instruction-introspection" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-instruction-introspection" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/instruction_introspection/src/lib.rs b/programs/bpf/rust/instruction_introspection/src/lib.rs index f39c811f42057f..b1ac55d03bd5e7 100644 --- a/programs/bpf/rust/instruction_introspection/src/lib.rs +++ b/programs/bpf/rust/instruction_introspection/src/lib.rs @@ -2,7 +2,6 @@ extern crate solana_program; use solana_program::{ - account_info::next_account_info, account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, @@ -25,20 +24,19 @@ fn process_instruction( } let secp_instruction_index = instruction_data[0]; - let account_info_iter = &mut accounts.iter(); - let instruction_accounts = next_account_info(account_info_iter)?; - assert_eq!(*instruction_accounts.key, instructions::id()); - let data_len = instruction_accounts.try_borrow_data()?.len(); + let instructions_account = accounts.last().ok_or(ProgramError::NotEnoughAccountKeys)?; + assert_eq!(*instructions_account.key, instructions::id()); + let data_len = instructions_account.try_borrow_data()?.len(); if data_len < 2 { return Err(ProgramError::InvalidAccountData); } let instruction = instructions::load_instruction_at_checked( secp_instruction_index as usize, - instruction_accounts, + instructions_account, )?; - let current_instruction = instructions::load_current_index_checked(instruction_accounts)?; + let current_instruction = instructions::load_current_index_checked(instructions_account)?; let my_index = instruction_data[1] as u16; assert_eq!(current_instruction, my_index); @@ -56,7 +54,7 @@ fn process_instruction( &[instruction_data[0], instruction_data[1], 1], vec![AccountMeta::new_readonly(instructions::id(), false)], ), - &[instruction_accounts.clone()], + &[instructions_account.clone()], )?; } diff --git a/programs/bpf/rust/invoke/Cargo.toml b/programs/bpf/rust/invoke/Cargo.toml index 06532a846300d8..213be0b9505c7c 100644 --- a/programs/bpf/rust/invoke/Cargo.toml +++ b/programs/bpf/rust/invoke/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -15,7 +15,7 @@ program = [] [dependencies] solana-bpf-rust-invoked = { path = "../invoked", default-features = false } -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/invoke_and_error/Cargo.toml b/programs/bpf/rust/invoke_and_error/Cargo.toml index 98fba8d2de18eb..9f342d3fdce0da 100644 --- a/programs/bpf/rust/invoke_and_error/Cargo.toml +++ b/programs/bpf/rust/invoke_and_error/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-error" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-error" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_ok/Cargo.toml b/programs/bpf/rust/invoke_and_ok/Cargo.toml index a42a30fbe99344..2b6571fca42a7c 100644 --- a/programs/bpf/rust/invoke_and_ok/Cargo.toml +++ b/programs/bpf/rust/invoke_and_ok/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-ok" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-ok" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_return/Cargo.toml b/programs/bpf/rust/invoke_and_return/Cargo.toml index 1d576d7ae66080..cdee95b88fc974 100644 --- a/programs/bpf/rust/invoke_and_return/Cargo.toml +++ b/programs/bpf/rust/invoke_and_return/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoke-and-return" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-invoke-and-return" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/invoke_and_return/src/lib.rs b/programs/bpf/rust/invoke_and_return/src/lib.rs index 9b4b81a6f408af..88fc249eb33e01 100644 --- a/programs/bpf/rust/invoke_and_return/src/lib.rs +++ b/programs/bpf/rust/invoke_and_return/src/lib.rs @@ -14,7 +14,6 @@ fn process_instruction( instruction_data: &[u8], ) -> ProgramResult { let to_call = accounts[0].key; - let infos = accounts; let instruction = Instruction { accounts: accounts[1..] .iter() @@ -27,5 +26,7 @@ fn process_instruction( data: instruction_data.to_owned(), program_id: *to_call, }; - invoke(&instruction, infos) + // program id account is not required for invocations if the + // program id is not one of the instruction account metas. + invoke(&instruction, &accounts[1..]) } diff --git a/programs/bpf/rust/invoked/Cargo.toml b/programs/bpf/rust/invoked/Cargo.toml index 54c27a67d81369..d8d57ab05b2aa2 100644 --- a/programs/bpf/rust/invoked/Cargo.toml +++ b/programs/bpf/rust/invoked/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-invoked" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,7 +14,7 @@ default = ["program"] program = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/invoked/src/processor.rs b/programs/bpf/rust/invoked/src/processor.rs index 3b94a69c0c5667..5053203688f26d 100644 --- a/programs/bpf/rust/invoked/src/processor.rs +++ b/programs/bpf/rust/invoked/src/processor.rs @@ -7,6 +7,7 @@ use solana_program::{ account_info::AccountInfo, bpf_loader, entrypoint, entrypoint::{ProgramResult, MAX_PERMITTED_DATA_INCREASE}, + log::sol_log_64, msg, program::{get_return_data, invoke, invoke_signed, set_return_data}, program_error::ProgramError, @@ -105,7 +106,7 @@ fn process_instruction( assert!(accounts[INVOKED_PROGRAM_DUP_INDEX] .try_borrow_mut_data() .is_err()); - msg!(data[0], 0, 0, 0, 0); + sol_log_64(data[0] as u64, 0, 0, 0, 0); } } RETURN_OK => { diff --git a/programs/bpf/rust/iter/Cargo.toml b/programs/bpf/rust/iter/Cargo.toml index e51688332089fc..54cfa209110ff9 100644 --- a/programs/bpf/rust/iter/Cargo.toml +++ b/programs/bpf/rust/iter/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-iter" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-iter" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/iter/src/lib.rs b/programs/bpf/rust/iter/src/lib.rs index 09ce81e5ef1c77..a262deb89f42f7 100644 --- a/programs/bpf/rust/iter/src/lib.rs +++ b/programs/bpf/rust/iter/src/lib.rs @@ -1,7 +1,7 @@ //! Example Rust-based BPF program tests loop iteration extern crate solana_program; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64}; #[no_mangle] pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { @@ -12,7 +12,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { for v in ones.iter() { sum += *v; } - msg!(0xff, 0, 0, 0, sum); + sol_log_64(0xff, 0, 0, 0, sum); assert_eq!(sum, ITERS as u64); SUCCESS diff --git a/programs/bpf/rust/log_data/Cargo.toml b/programs/bpf/rust/log_data/Cargo.toml index c3f96a1fba2d90..58ba601577441d 100644 --- a/programs/bpf/rust/log_data/Cargo.toml +++ b/programs/bpf/rust/log_data/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-log-data" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-log-data" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [features] default = ["program"] diff --git a/programs/bpf/rust/many_args/Cargo.toml b/programs/bpf/rust/many_args/Cargo.toml index f548c446967fe2..052ed9d77129dd 100644 --- a/programs/bpf/rust/many_args/Cargo.toml +++ b/programs/bpf/rust/many_args/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } -solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } +solana-bpf-rust-many-args-dep = { path = "../many_args_dep", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/many_args_dep/Cargo.toml b/programs/bpf/rust/many_args_dep/Cargo.toml index 4a2ea5a431a18e..257c8450f9e01d 100644 --- a/programs/bpf/rust/many_args_dep/Cargo.toml +++ b/programs/bpf/rust/many_args_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-many-args-dep" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-many-args-dep" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/many_args_dep/src/lib.rs b/programs/bpf/rust/many_args_dep/src/lib.rs index 98d9279c14bdef..d43d6f9a301897 100644 --- a/programs/bpf/rust/many_args_dep/src/lib.rs +++ b/programs/bpf/rust/many_args_dep/src/lib.rs @@ -1,7 +1,7 @@ //! Solana Rust-based BPF program utility functions and types extern crate solana_program; -use solana_program::msg; +use solana_program::{log::sol_log_64, msg}; pub fn many_args( arg1: u64, @@ -15,8 +15,8 @@ pub fn many_args( arg9: u64, ) -> u64 { msg!("Another package - many_args"); - msg!(arg1, arg2, arg3, arg4, arg5); - msg!(arg6, arg7, arg8, arg9, 0); + sol_log_64(arg1, arg2, arg3, arg4, arg5); + sol_log_64(arg6, arg7, arg8, arg9, 0); arg1 + arg2 + arg3 + arg4 + arg5 + arg6 + arg7 + arg8 + arg9 } @@ -39,8 +39,8 @@ pub fn many_args_sret( arg9: u64, ) -> Ret { msg!("Another package - many_args_sret"); - msg!(arg1, arg2, arg3, arg4, arg5); - msg!(arg6, arg7, arg8, arg9, 0); + sol_log_64(arg1, arg2, arg3, arg4, arg5); + sol_log_64(arg6, arg7, arg8, arg9, 0); Ret { group1: u128::from(arg1) + u128::from(arg2) + u128::from(arg3), group2: u128::from(arg4) + u128::from(arg5) + u128::from(arg6), diff --git a/programs/bpf/rust/mem/Cargo.toml b/programs/bpf/rust/mem/Cargo.toml index 6319244248a7a2..405d30467376f9 100644 --- a/programs/bpf/rust/mem/Cargo.toml +++ b/programs/bpf/rust/mem/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-mem" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,12 +13,12 @@ edition = "2021" no-entrypoint = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [dev-dependencies] -solana-program-runtime = { path = "../../../../program-runtime", version = "=1.9.0" } -solana-program-test = { path = "../../../../program-test", version = "=1.9.0" } -solana-sdk = { path = "../../../../sdk", version = "=1.9.0" } +solana-program-runtime = { path = "../../../../program-runtime", version = "=1.9.4" } +solana-program-test = { path = "../../../../program-test", version = "=1.9.4" } +solana-sdk = { path = "../../../../sdk", version = "=1.9.4" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/membuiltins/Cargo.toml b/programs/bpf/rust/membuiltins/Cargo.toml index c45ad212289743..ab1305653a4ce2 100644 --- a/programs/bpf/rust/membuiltins/Cargo.toml +++ b/programs/bpf/rust/membuiltins/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-membuiltins" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-mem" edition = "2021" [dependencies] -solana-bpf-rust-mem = { path = "../mem", version = "=1.9.0", features = [ "no-entrypoint" ] } -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-bpf-rust-mem = { path = "../mem", version = "=1.9.4", features = [ "no-entrypoint" ] } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/noop/Cargo.toml b/programs/bpf/rust/noop/Cargo.toml index bf34291b974bd0..0a746cb9c3e77c 100644 --- a/programs/bpf/rust/noop/Cargo.toml +++ b/programs/bpf/rust/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-noop" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-noop" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/panic/Cargo.toml b/programs/bpf/rust/panic/Cargo.toml index cddbc2080aacdb..ffde4680d4f9f5 100644 --- a/programs/bpf/rust/panic/Cargo.toml +++ b/programs/bpf/rust/panic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-panic" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-panic" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [features] default = ["custom-panic"] diff --git a/programs/bpf/rust/param_passing/Cargo.toml b/programs/bpf/rust/param_passing/Cargo.toml index 3fccdefd314f4a..2781b10ec175cf 100644 --- a/programs/bpf/rust/param_passing/Cargo.toml +++ b/programs/bpf/rust/param_passing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } -solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } +solana-bpf-rust-param-passing-dep = { path = "../param_passing_dep", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/param_passing/src/lib.rs b/programs/bpf/rust/param_passing/src/lib.rs index fa81fe5d459246..f3c1c30ee334b1 100644 --- a/programs/bpf/rust/param_passing/src/lib.rs +++ b/programs/bpf/rust/param_passing/src/lib.rs @@ -2,7 +2,7 @@ extern crate solana_program; use solana_bpf_rust_param_passing_dep::{Data, TestDep}; -use solana_program::{custom_panic_default, entrypoint::SUCCESS, msg}; +use solana_program::{custom_panic_default, entrypoint::SUCCESS, log::sol_log_64}; #[no_mangle] pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { @@ -17,7 +17,7 @@ pub extern "C" fn entrypoint(_input: *mut u8) -> u64 { }; let test_dep = TestDep::new(&data, 1, 2, 3, 4, 5); - msg!(0, 0, 0, 0, test_dep.thirty); + sol_log_64(0, 0, 0, 0, test_dep.thirty as u64); assert!(test_dep.thirty == 30); SUCCESS diff --git a/programs/bpf/rust/param_passing_dep/Cargo.toml b/programs/bpf/rust/param_passing_dep/Cargo.toml index 2cb1bd9142e996..a91d598357d474 100644 --- a/programs/bpf/rust/param_passing_dep/Cargo.toml +++ b/programs/bpf/rust/param_passing_dep/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-param-passing-dep" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-param-passing-dep" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/programs/bpf/rust/rand/Cargo.toml b/programs/bpf/rust/rand/Cargo.toml index 846dcea25cec94..1e7e8c682aa662 100644 --- a/programs/bpf/rust/rand/Cargo.toml +++ b/programs/bpf/rust/rand/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-rand" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,7 +12,7 @@ edition = "2021" [dependencies] getrandom = { version = "0.1.14", features = ["dummy"] } rand = "0.7" -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/realloc/Cargo.toml b/programs/bpf/rust/realloc/Cargo.toml index 6c21391b85f612..6c47c985289127 100644 --- a/programs/bpf/rust/realloc/Cargo.toml +++ b/programs/bpf/rust/realloc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-realloc" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,7 +14,7 @@ default = ["program"] program = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/realloc_invoke/Cargo.toml b/programs/bpf/rust/realloc_invoke/Cargo.toml index 3cedef126c561e..32ac1fe8cf0369 100644 --- a/programs/bpf/rust/realloc_invoke/Cargo.toml +++ b/programs/bpf/rust/realloc_invoke/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-realloc-invoke" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,8 +14,8 @@ default = ["program"] program = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } -solana-bpf-rust-realloc = { path = "../realloc", version = "=1.9.0", default-features = false } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } +solana-bpf-rust-realloc = { path = "../realloc", version = "=1.9.4", default-features = false } [lib] crate-type = ["lib", "cdylib"] diff --git a/programs/bpf/rust/ro_account_modify/Cargo.toml b/programs/bpf/rust/ro_account_modify/Cargo.toml index b512d7f917cc34..d51f99ca73c9b9 100644 --- a/programs/bpf/rust/ro_account_modify/Cargo.toml +++ b/programs/bpf/rust/ro_account_modify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-ro-account_modify" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-ro-modify" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/ro_modify/Cargo.toml b/programs/bpf/rust/ro_modify/Cargo.toml index 31d3c807e9a58c..67da81d17e513a 100644 --- a/programs/bpf/rust/ro_modify/Cargo.toml +++ b/programs/bpf/rust/ro_modify/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-ro-modify" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-ro-modify" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sanity/Cargo.toml b/programs/bpf/rust/sanity/Cargo.toml index 3ed7cb3a04ab1f..c1736665a87860 100644 --- a/programs/bpf/rust/sanity/Cargo.toml +++ b/programs/bpf/rust/sanity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sanity" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,12 +13,12 @@ edition = "2021" test-bpf = [] [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [dev-dependencies] -solana-program-runtime = { path = "../../../../program-runtime", version = "=1.9.0" } -solana-program-test = { path = "../../../../program-test", version = "=1.9.0" } -solana-sdk = { path = "../../../../sdk", version = "=1.9.0" } +solana-program-runtime = { path = "../../../../program-runtime", version = "=1.9.4" } +solana-program-test = { path = "../../../../program-test", version = "=1.9.4" } +solana-sdk = { path = "../../../../sdk", version = "=1.9.4" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/secp256k1_recover/Cargo.toml b/programs/bpf/rust/secp256k1_recover/Cargo.toml index b837039ddba8c9..49b71fe418495b 100644 --- a/programs/bpf/rust/secp256k1_recover/Cargo.toml +++ b/programs/bpf/rust/secp256k1_recover/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-secp256k1-recover" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-secp256k1-recover" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sha/Cargo.toml b/programs/bpf/rust/sha/Cargo.toml index 6c4b94616215f6..d05742b501f8f1 100644 --- a/programs/bpf/rust/sha/Cargo.toml +++ b/programs/bpf/rust/sha/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sha" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ edition = "2021" [dependencies] blake3 = "1.0.0" -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1/Cargo.toml b/programs/bpf/rust/spoof1/Cargo.toml index 31daadfdefeba4..74eeaf3cabe02b 100644 --- a/programs/bpf/rust/spoof1/Cargo.toml +++ b/programs/bpf/rust/spoof1/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/spoof1_system/Cargo.toml b/programs/bpf/rust/spoof1_system/Cargo.toml index f5124d54e03c99..1b7e860a97796f 100644 --- a/programs/bpf/rust/spoof1_system/Cargo.toml +++ b/programs/bpf/rust/spoof1_system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-spoof1-system" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-spoof1-system" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/programs/bpf/rust/sysvar/Cargo.toml b/programs/bpf/rust/sysvar/Cargo.toml index 925b73689a4ccc..02d86256ecd81d 100644 --- a/programs/bpf/rust/sysvar/Cargo.toml +++ b/programs/bpf/rust/sysvar/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-sysvar" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,12 +10,12 @@ documentation = "https://docs.rs/solana-bpf-rust-sysvar" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [dev-dependencies] -solana-program-runtime = { path = "../../../../program-runtime", version = "=1.9.0" } -solana-program-test = { path = "../../../../program-test", version = "=1.9.0" } -solana-sdk = { path = "../../../../sdk", version = "=1.9.0" } +solana-program-runtime = { path = "../../../../program-runtime", version = "=1.9.4" } +solana-program-test = { path = "../../../../program-test", version = "=1.9.4" } +solana-sdk = { path = "../../../../sdk", version = "=1.9.4" } [lib] crate-type = ["cdylib", "lib"] diff --git a/programs/bpf/rust/upgradeable/Cargo.toml b/programs/bpf/rust/upgradeable/Cargo.toml index 0e896b59b0aaa8..c57d1dac9e028b 100644 --- a/programs/bpf/rust/upgradeable/Cargo.toml +++ b/programs/bpf/rust/upgradeable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgradeable" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgradeable" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] name = "solana_bpf_rust_upgradeable" diff --git a/programs/bpf/rust/upgradeable/src/lib.rs b/programs/bpf/rust/upgradeable/src/lib.rs index 76a9f759b6c36a..2186ab274a28d0 100644 --- a/programs/bpf/rust/upgradeable/src/lib.rs +++ b/programs/bpf/rust/upgradeable/src/lib.rs @@ -8,13 +8,12 @@ use solana_program::{ entrypoint!(process_instruction); fn process_instruction( - program_id: &Pubkey, + _program_id: &Pubkey, accounts: &[AccountInfo], _instruction_data: &[u8], ) -> ProgramResult { msg!("Upgradeable program"); - assert_eq!(accounts.len(), 2); - assert_eq!(accounts[0].key, program_id); - assert_eq!(*accounts[1].key, clock::id()); + assert_eq!(accounts.len(), 1); + assert_eq!(*accounts[0].key, clock::id()); Err(42.into()) } diff --git a/programs/bpf/rust/upgraded/Cargo.toml b/programs/bpf/rust/upgraded/Cargo.toml index c84bbae95248c6..8dc79f33e1b291 100644 --- a/programs/bpf/rust/upgraded/Cargo.toml +++ b/programs/bpf/rust/upgraded/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-rust-upgraded" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ documentation = "https://docs.rs/solana-bpf-rust-upgraded" edition = "2021" [dependencies] -solana-program = { path = "../../../../sdk/program", version = "=1.9.0" } +solana-program = { path = "../../../../sdk/program", version = "=1.9.4" } [lib] name = "solana_bpf_rust_upgraded" diff --git a/programs/bpf/rust/upgraded/src/lib.rs b/programs/bpf/rust/upgraded/src/lib.rs index 5e5e63d0b07df4..152b58234f8300 100644 --- a/programs/bpf/rust/upgraded/src/lib.rs +++ b/programs/bpf/rust/upgraded/src/lib.rs @@ -8,13 +8,12 @@ use solana_program::{ entrypoint!(process_instruction); fn process_instruction( - program_id: &Pubkey, + _program_id: &Pubkey, accounts: &[AccountInfo], _instruction_data: &[u8], ) -> ProgramResult { msg!("Upgraded program"); - assert_eq!(accounts.len(), 2); - assert_eq!(accounts[0].key, program_id); - assert_eq!(*accounts[1].key, clock::id()); + assert_eq!(accounts.len(), 1); + assert_eq!(*accounts[0].key, clock::id()); Err(43.into()) } diff --git a/programs/bpf/tests/programs.rs b/programs/bpf/tests/programs.rs index 914f6487a3f460..1b64b2d9f095aa 100644 --- a/programs/bpf/tests/programs.rs +++ b/programs/bpf/tests/programs.rs @@ -17,7 +17,6 @@ use solana_bpf_loader_program::{ use solana_bpf_rust_invoke::instructions::*; use solana_bpf_rust_realloc::instructions::*; use solana_bpf_rust_realloc_invoke::instructions::*; -use solana_cli_output::display::println_transaction; use solana_program_runtime::invoke_context::with_mock_invoke_context; use solana_rbpf::{ elf::Executable, @@ -25,7 +24,10 @@ use solana_rbpf::{ vm::{Config, Tracer}, }; use solana_runtime::{ - bank::{Bank, ExecuteTimings, NonceInfo, TransactionBalancesSet, TransactionResults}, + bank::{ + Bank, DurableNonceFee, ExecuteTimings, TransactionBalancesSet, TransactionExecutionDetails, + TransactionExecutionResult, TransactionResults, + }, bank_client::BankClient, genesis_utils::{create_genesis_config, GenesisConfigInfo}, loader_utils::{ @@ -53,12 +55,9 @@ use solana_sdk::{ }; use solana_transaction_status::{ token_balances::collect_token_balances, ConfirmedTransaction, InnerInstructions, - TransactionStatusMeta, TransactionWithStatusMeta, UiTransactionEncoding, -}; -use std::{ - collections::HashMap, convert::TryFrom, env, fs::File, io::Read, path::PathBuf, str::FromStr, - sync::Arc, + TransactionStatusMeta, TransactionWithStatusMeta, }; +use std::{collections::HashMap, env, fs::File, io::Read, path::PathBuf, str::FromStr, sync::Arc}; /// BPF program file extension const PLATFORM_FILE_EXTENSION_BPF: &str = "so"; @@ -222,7 +221,7 @@ fn run_program(name: &str) -> u64 { register_syscalls(invoke_context).unwrap(), ) .unwrap(); - executable.jit_compile().unwrap(); + Executable::::jit_compile(&mut executable).unwrap(); let mut instruction_count = 0; let mut tracer = None; @@ -299,7 +298,7 @@ fn process_transaction_and_record_inner( let signature = tx.signatures.get(0).unwrap().clone(); let txs = vec![tx]; let tx_batch = bank.prepare_batch_for_tests(txs); - let (mut results, _, mut inner_instructions, _transaction_logs) = bank + let mut results = bank .load_execute_and_commit_transactions( &tx_batch, MAX_PROCESSING_AGE, @@ -307,20 +306,27 @@ fn process_transaction_and_record_inner( true, false, &mut ExecuteTimings::default(), - ); + ) + .0; let result = results .fee_collection_results .swap_remove(0) .and_then(|_| bank.get_signature_status(&signature).unwrap()); - ( - result, - inner_instructions - .swap_remove(0) - .expect("cpi recording should be enabled"), - ) + let inner_instructions = results + .execution_results + .swap_remove(0) + .details() + .expect("tx should be executed") + .clone() + .inner_instructions + .expect("cpi recording should be enabled"); + (result, inner_instructions) } -fn execute_transactions(bank: &Bank, txs: Vec) -> Vec { +fn execute_transactions( + bank: &Bank, + txs: Vec, +) -> Vec> { let batch = bank.prepare_batch_for_tests(txs.clone()); let mut timings = ExecuteTimings::default(); let mut mint_decimals = HashMap::new(); @@ -334,8 +340,6 @@ fn execute_transactions(bank: &Bank, txs: Vec) -> Vec) -> Vec { + let TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + durable_nonce_fee, + } = details; + + let lamports_per_signature = match durable_nonce_fee { + Some(DurableNonceFee::Valid(lamports_per_signature)) => { + Some(lamports_per_signature) + } + Some(DurableNonceFee::Invalid) => None, + None => bank.get_lamports_per_signature_for_blockhash( + &tx.message().recent_blockhash, + ), + } + .expect("lamports_per_signature must be available"); + let fee = Bank::get_fee_for_message_with_lamports_per_signature( + &SanitizedMessage::try_from(tx.message().clone()).unwrap(), + lamports_per_signature, + ); + + let inner_instructions = inner_instructions.map(|inner_instructions| { + inner_instructions + .into_iter() + .enumerate() + .map(|(index, instructions)| InnerInstructions { + index: index as u8, + instructions, + }) + .filter(|i| !i.instructions.is_empty()) + .collect() + }); + + let tx_status_meta = TransactionStatusMeta { + status, + fee, + pre_balances, + post_balances, + pre_token_balances: Some(pre_token_balances), + post_token_balances: Some(post_token_balances), + inner_instructions, + log_messages, + rewards: None, + }; + + Ok(ConfirmedTransaction { + slot: bank.slot(), + transaction: TransactionWithStatusMeta { + transaction: tx.clone(), + meta: Some(tx_status_meta), + }, + block_time: None, }) - .filter(|i| !i.instructions.is_empty()) - .collect() - }); - - let tx_status_meta = TransactionStatusMeta { - status: execute_result, - fee, - pre_balances, - post_balances, - pre_token_balances: Some(pre_token_balances), - post_token_balances: Some(post_token_balances), - inner_instructions, - log_messages, - rewards: None, - }; - - ConfirmedTransaction { - slot: bank.slot(), - transaction: TransactionWithStatusMeta { - transaction: tx.clone(), - meta: Some(tx_status_meta), - }, - block_time: None, + } + TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), } }, ) .collect() } -fn print_confirmed_tx(name: &str, confirmed_tx: ConfirmedTransaction) { - let block_time = confirmed_tx.block_time; - let tx = confirmed_tx.transaction.transaction.clone(); - let encoded = confirmed_tx.encode(UiTransactionEncoding::JsonParsed); - println!("EXECUTE {} (slot {})", name, encoded.slot); - println_transaction(&tx, &encoded.transaction.meta, " ", None, block_time); -} - #[test] #[cfg(any(feature = "bpf_c", feature = "bpf_rust"))] fn test_program_bpf_sanity() { @@ -1399,7 +1407,7 @@ fn assert_instruction_count() { ("solana_bpf_rust_param_passing", 146), ("solana_bpf_rust_rand", 488), ("solana_bpf_rust_sanity", 8455), - ("solana_bpf_rust_secp256k1_recover", 25216), + ("solana_bpf_rust_secp256k1_recover", 25624), ("solana_bpf_rust_sha", 30692), ]); } @@ -1449,7 +1457,10 @@ fn test_program_bpf_instruction_introspection() { ); // Passing transaction - let account_metas = vec![AccountMeta::new_readonly(sysvar::instructions::id(), false)]; + let account_metas = vec![ + AccountMeta::new_readonly(program_id, false), + AccountMeta::new_readonly(sysvar::instructions::id(), false), + ]; let instruction0 = Instruction::new_with_bytes(program_id, &[0u8, 0u8], account_metas.clone()); let instruction1 = Instruction::new_with_bytes(program_id, &[0u8, 1u8], account_metas.clone()); let instruction2 = Instruction::new_with_bytes(program_id, &[0u8, 2u8], account_metas); @@ -1736,14 +1747,8 @@ fn test_program_bpf_upgrade() { "solana_bpf_rust_upgradeable", ); - let mut instruction = Instruction::new_with_bytes( - program_id, - &[0], - vec![ - AccountMeta::new(program_id.clone(), false), - AccountMeta::new(clock::id(), false), - ], - ); + let mut instruction = + Instruction::new_with_bytes(program_id, &[0], vec![AccountMeta::new(clock::id(), false)]); // Call upgrade program let result = bank_client.send_and_confirm_instruction(&mint_keypair, instruction.clone()); @@ -1831,14 +1836,8 @@ fn test_program_bpf_upgrade_and_invoke_in_same_tx() { "solana_bpf_rust_noop", ); - let invoke_instruction = Instruction::new_with_bytes( - program_id, - &[0], - vec![ - AccountMeta::new(program_id.clone(), false), - AccountMeta::new(clock::id(), false), - ], - ); + let invoke_instruction = + Instruction::new_with_bytes(program_id, &[0], vec![AccountMeta::new(clock::id(), false)]); // Call upgradeable program let result = @@ -1922,7 +1921,6 @@ fn test_program_bpf_invoke_upgradeable_via_cpi() { invoke_and_return, &[0], vec![ - AccountMeta::new_readonly(program_id, false), AccountMeta::new_readonly(program_id, false), AccountMeta::new_readonly(clock::id(), false), ], @@ -2111,7 +2109,6 @@ fn test_program_bpf_upgrade_via_cpi() { invoke_and_return, &[0], vec![ - AccountMeta::new_readonly(program_id, false), AccountMeta::new_readonly(program_id, false), AccountMeta::new_readonly(clock::id(), false), ], @@ -2215,7 +2212,6 @@ fn test_program_bpf_upgrade_self_via_cpi() { program_id, &[0], vec![ - AccountMeta::new_readonly(noop_program_id, false), AccountMeta::new_readonly(noop_program_id, false), AccountMeta::new_readonly(clock::id(), false), ], @@ -2452,43 +2448,35 @@ fn test_program_upgradeable_locks() { execute_transactions(&bank, vec![invoke_tx, upgrade_tx]) }; - if false { - println!("upgrade and invoke"); - for result in &results1 { - print_confirmed_tx("result", result.clone()); - } - println!("invoke and upgrade"); - for result in &results2 { - print_confirmed_tx("result", result.clone()); - } - } - - if let Some(ref meta) = results1[0].transaction.meta { - assert_eq!(meta.status, Ok(())); - } else { - panic!("no meta"); - } - if let Some(ref meta) = results1[1].transaction.meta { - assert_eq!(meta.status, Err(TransactionError::AccountInUse)); - } else { - panic!("no meta"); - } - if let Some(ref meta) = results2[0].transaction.meta { - assert_eq!( - meta.status, - Err(TransactionError::InstructionError( - 0, - InstructionError::ProgramFailedToComplete - )) - ); - } else { - panic!("no meta"); - } - if let Some(ref meta) = results2[1].transaction.meta { - assert_eq!(meta.status, Err(TransactionError::AccountInUse)); - } else { - panic!("no meta"); - } + assert!(matches!( + results1[0], + Ok(ConfirmedTransaction { + transaction: TransactionWithStatusMeta { + meta: Some(TransactionStatusMeta { status: Ok(()), .. }), + .. + }, + .. + }) + )); + assert_eq!(results1[1], Err(TransactionError::AccountInUse)); + + assert!(matches!( + results2[0], + Ok(ConfirmedTransaction { + transaction: TransactionWithStatusMeta { + meta: Some(TransactionStatusMeta { + status: Err(TransactionError::InstructionError( + 0, + InstructionError::ProgramFailedToComplete + )), + .. + }), + .. + }, + .. + }) + )); + assert_eq!(results2[1], Err(TransactionError::AccountInUse)); } #[cfg(feature = "bpf_rust")] diff --git a/programs/bpf_loader/Cargo.toml b/programs/bpf_loader/Cargo.toml index ca63a56a8c4c59..fd69198c35cd46 100644 --- a/programs/bpf_loader/Cargo.toml +++ b/programs/bpf_loader/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-bpf-loader-program" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF loader" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -9,23 +9,20 @@ homepage = "https://solana.com/" documentation = "https://docs.rs/solana-bpf-loader-program" edition = "2021" -[build-dependencies] -regex = "1.5.4" - [dependencies] bincode = "1.3.3" byteorder = "1.4.3" log = "0.4.14" libsecp256k1 = "0.6.0" -solana-measure = { path = "../../measure", version = "=1.9.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../../sdk", version = "=1.9.0" } -solana_rbpf = "=0.2.16" +solana-measure = { path = "../../measure", version = "=1.9.4" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } +solana_rbpf = "=0.2.21" thiserror = "1.0" [dev-dependencies] rand = "0.7.3" -solana-runtime = { path = "../../runtime", version = "=1.9.0" } +solana-runtime = { path = "../../runtime", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/programs/bpf_loader/gen-syscall-list/Cargo.toml b/programs/bpf_loader/gen-syscall-list/Cargo.toml new file mode 100644 index 00000000000000..2243eece1e65ec --- /dev/null +++ b/programs/bpf_loader/gen-syscall-list/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "gen-syscall-list" +version = "1.9.4" +edition = "2021" +license = "Apache-2.0" +publish = false + +[build-dependencies] +regex = "1.5.4" diff --git a/programs/bpf_loader/build.rs b/programs/bpf_loader/gen-syscall-list/build.rs similarity index 58% rename from programs/bpf_loader/build.rs rename to programs/bpf_loader/gen-syscall-list/build.rs index 1ac6f087f259d2..5a22239ce016eb 100644 --- a/programs/bpf_loader/build.rs +++ b/programs/bpf_loader/gen-syscall-list/build.rs @@ -4,7 +4,6 @@ use { fs::File, io::{prelude::*, BufWriter, Read}, path::PathBuf, - process::exit, str, }, }; @@ -15,18 +14,24 @@ use { * to verify undefined symbols in a .so module that cargo-build-bpf has built. */ fn main() { - let path = PathBuf::from("src/syscalls.rs"); - let mut file = match File::open(&path) { + let syscalls_rs_path = PathBuf::from("../src/syscalls.rs"); + let syscalls_txt_path = PathBuf::from("../../../sdk/bpf/syscalls.txt"); + println!( + "cargo:warning=(not a warning) Generating {1} from {0}", + syscalls_rs_path.display(), + syscalls_txt_path.display() + ); + + let mut file = match File::open(&syscalls_rs_path) { Ok(x) => x, - _ => exit(1), + Err(err) => panic!("Failed to open {}: {}", syscalls_rs_path.display(), err), }; let mut text = vec![]; file.read_to_end(&mut text).unwrap(); let text = str::from_utf8(&text).unwrap(); - let path = PathBuf::from("../../sdk/bpf/syscalls.txt"); - let file = match File::create(&path) { + let file = match File::create(&syscalls_txt_path) { Ok(x) => x, - _ => exit(1), + Err(err) => panic!("Failed to create {}: {}", syscalls_txt_path.display(), err), }; let mut out = BufWriter::new(file); let sysc_re = Regex::new(r#"register_syscall_by_name\([[:space:]]*b"([^"]+)","#).unwrap(); diff --git a/programs/bpf_loader/gen-syscall-list/src/main.rs b/programs/bpf_loader/gen-syscall-list/src/main.rs new file mode 100644 index 00000000000000..884defbe61c211 --- /dev/null +++ b/programs/bpf_loader/gen-syscall-list/src/main.rs @@ -0,0 +1,3 @@ +fn main() { + /* I do all my work in `../build.rs` */ +} diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index b468b6a27cb4b1..7fd974d0e9eb86 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -38,7 +38,7 @@ use { clock::Clock, entrypoint::{HEAP_LENGTH, SUCCESS}, feature_set::{ - do_support_realloc, reduce_required_deploy_balance, + do_support_realloc, reduce_required_deploy_balance, reject_all_elf_rw, reject_deployment_of_unresolved_syscalls, reject_section_virtual_address_file_offset_mismatch, requestable_heap_size, start_verify_shift32_imm, stop_verify_mul64_imm_nonzero, @@ -52,7 +52,7 @@ use { rent::Rent, system_instruction::{self, MAX_PERMITTED_DATA_LENGTH}, }, - std::{cell::RefCell, fmt::Debug, rc::Rc, sync::Arc}, + std::{cell::RefCell, fmt::Debug, pin::Pin, rc::Rc, sync::Arc}, thiserror::Error, }; @@ -107,6 +107,9 @@ pub fn create_executor( verify_shift32_imm: invoke_context .feature_set .is_active(&start_verify_shift32_imm::id()), + reject_all_writable_sections: invoke_context + .feature_set + .is_active(&reject_all_elf_rw::id()), ..Config::default() }; let mut executable = { @@ -124,7 +127,8 @@ pub fn create_executor( verifier::check(text_bytes, &config) .map_err(|e| map_ebpf_error(invoke_context, EbpfError::UserError(e.into())))?; if use_jit { - if let Err(err) = executable.jit_compile() { + if let Err(err) = Executable::::jit_compile(&mut executable) + { ic_msg!(invoke_context, "Failed to compile program {:?}", err); return Err(InstructionError::ProgramFailedToCompile); } @@ -164,7 +168,7 @@ fn check_loader_id(id: &Pubkey) -> bool { /// Create the BPF virtual machine pub fn create_vm<'a, 'b>( - program: &'a Executable, + program: &'a Pin>>, parameter_bytes: &mut [u8], invoke_context: &'a mut InvokeContext<'b>, orig_data_lens: &'a [usize], @@ -955,7 +959,7 @@ impl InstructionMeter for ThisInstructionMeter { /// BPF Loader's Executor implementation pub struct BpfExecutor { - executable: Executable, + executable: Pin>>, } // Well, implement Debug for solana_rbpf::vm::Executable in solana-rbpf... @@ -991,7 +995,7 @@ impl Executor for BpfExecutor { serialize_time.stop(); let mut create_vm_time = Measure::start("create_vm"); let mut execute_time; - { + let execution_result = { let mut vm = match create_vm( &self.executable, parameter_bytes.as_slice_mut(), @@ -1036,12 +1040,10 @@ impl Executor for BpfExecutor { stable_log::program_return(&log_collector, &program_id, return_data); } match result { - Ok(status) => { - if status != SUCCESS { - let error: InstructionError = status.into(); - stable_log::program_failure(&log_collector, &program_id, &error); - return Err(error); - } + Ok(status) if status != SUCCESS => { + let error: InstructionError = status.into(); + stable_log::program_failure(&log_collector, &program_id, &error); + Err(error) } Err(error) => { let error = match error { @@ -1054,23 +1056,29 @@ impl Executor for BpfExecutor { } }; stable_log::program_failure(&log_collector, &program_id, &error); - return Err(error); + Err(error) } + _ => Ok(()), } - execute_time.stop(); - } + }; + execute_time.stop(); + let mut deserialize_time = Measure::start("deserialize"); - let keyed_accounts = invoke_context.get_keyed_accounts()?; - deserialize_parameters( - &loader_id, - &keyed_accounts[first_instruction_account + 1..], - parameter_bytes.as_slice(), - &account_lengths, - invoke_context - .feature_set - .is_active(&do_support_realloc::id()), - )?; + let execute_or_deserialize_result = execution_result.and_then(|_| { + let keyed_accounts = invoke_context.get_keyed_accounts()?; + deserialize_parameters( + &loader_id, + &keyed_accounts[first_instruction_account + 1..], + parameter_bytes.as_slice(), + &account_lengths, + invoke_context + .feature_set + .is_active(&do_support_realloc::id()), + ) + }); deserialize_time.stop(); + + // Update the timings let timings = &mut invoke_context.timings; timings.serialize_us = timings.serialize_us.saturating_add(serialize_time.as_us()); timings.create_vm_us = timings.create_vm_us.saturating_add(create_vm_time.as_us()); @@ -1078,8 +1086,11 @@ impl Executor for BpfExecutor { timings.deserialize_us = timings .deserialize_us .saturating_add(deserialize_time.as_us()); - stable_log::program_success(&log_collector, &program_id); - Ok(()) + + if execute_or_deserialize_result.is_ok() { + stable_log::program_success(&log_collector, &program_id); + } + execute_or_deserialize_result } } diff --git a/programs/bpf_loader/src/serialization.rs b/programs/bpf_loader/src/serialization.rs index cfde48561aa4ad..4ae530e21fd4c7 100644 --- a/programs/bpf_loader/src/serialization.rs +++ b/programs/bpf_loader/src/serialization.rs @@ -455,7 +455,7 @@ mod tests { &preparation.message, &preparation.message.instructions[0], &program_indices, - Some(&preparation.account_indices), + &preparation.account_indices, ) .unwrap(); diff --git a/programs/bpf_loader/src/syscalls.rs b/programs/bpf_loader/src/syscalls.rs index a9aa33f3c54236..ea4db313215205 100644 --- a/programs/bpf_loader/src/syscalls.rs +++ b/programs/bpf_loader/src/syscalls.rs @@ -24,10 +24,10 @@ use { entrypoint::{BPF_ALIGN_OF_U128, MAX_PERMITTED_DATA_INCREASE, SUCCESS}, epoch_schedule::EpochSchedule, feature_set::{ - blake3_syscall_enabled, demote_program_write_locks, disable_fees_sysvar, - do_support_realloc, libsecp256k1_0_5_upgrade_enabled, - prevent_calling_precompiles_as_programs, return_data_syscall_enabled, - secp256k1_recover_syscall_enabled, sol_log_data_syscall_enabled, + blake3_syscall_enabled, disable_fees_sysvar, do_support_realloc, + libsecp256k1_0_5_upgrade_enabled, prevent_calling_precompiles_as_programs, + return_data_syscall_enabled, secp256k1_recover_syscall_enabled, + sol_log_data_syscall_enabled, }, hash::{Hasher, HASH_BYTES}, instruction::{AccountMeta, Instruction, InstructionError}, @@ -2179,9 +2179,6 @@ fn get_translated_accounts<'a, T, F>( where F: Fn(&T, &InvokeContext) -> Result, EbpfError>, { - let demote_program_write_locks = invoke_context - .feature_set - .is_active(&demote_program_write_locks::id()); let keyed_accounts = invoke_context .get_instruction_keyed_accounts() .map_err(SyscallError::InstructionError)?; @@ -2209,7 +2206,7 @@ where account.set_executable(caller_account.executable); account.set_rent_epoch(caller_account.rent_epoch); } - let caller_account = if message.is_writable(i, demote_program_write_locks) { + let caller_account = if message.is_writable(i) { if let Some(orig_data_len_index) = keyed_accounts .iter() .position(|keyed_account| keyed_account.unsigned_key() == account_key) @@ -2368,12 +2365,14 @@ fn call<'a, 'b: 'a>( // Process instruction invoke_context - .process_cross_program_instruction( + .process_instruction( &message, + &message.instructions[0], &program_indices, &account_indices, &caller_write_privileges, ) + .result .map_err(SyscallError::InstructionError)?; // Copy results back to caller @@ -2967,7 +2966,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall_panic = SyscallPanic { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3044,7 +3043,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall_sol_log = SyscallLog { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3148,7 +3147,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let cost = invoke_context.get_compute_budget().log_64_units; let mut syscall_sol_log_u64 = SyscallLogU64 { @@ -3190,7 +3189,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let cost = invoke_context.get_compute_budget().log_pubkey_units; let mut syscall_sol_pubkey = SyscallLogPubkey { @@ -3464,7 +3463,7 @@ mod tests { * 4, ); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall = SyscallSha256 { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3563,7 +3562,7 @@ mod tests { let sysvars = [(sysvar::clock::id(), data)]; invoke_context.sysvars = &sysvars; invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall = SyscallGetClockSysvar { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3608,7 +3607,7 @@ mod tests { let sysvars = [(sysvar::epoch_schedule::id(), data)]; invoke_context.sysvars = &sysvars; invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall = SyscallGetEpochScheduleSysvar { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3660,7 +3659,7 @@ mod tests { let sysvars = [(sysvar::fees::id(), data)]; invoke_context.sysvars = &sysvars; invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall = SyscallGetFeesSysvar { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3703,7 +3702,7 @@ mod tests { let sysvars = [(sysvar::rent::id(), data)]; invoke_context.sysvars = &sysvars; invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let mut syscall = SyscallGetRentSysvar { invoke_context: Rc::new(RefCell::new(&mut invoke_context)), @@ -3841,7 +3840,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let address = bpf_loader_upgradeable::id(); @@ -3957,7 +3956,7 @@ mod tests { ); let mut invoke_context = InvokeContext::new_mock(&accounts, &[]); invoke_context - .push(&message, &message.instructions[0], &[0], None) + .push(&message, &message.instructions[0], &[0], &[]) .unwrap(); let cost = invoke_context .get_compute_budget() diff --git a/programs/compute-budget/Cargo.toml b/programs/compute-budget/Cargo.toml index 3f2a14b0468f56..4f40da3c0044cb 100644 --- a/programs/compute-budget/Cargo.toml +++ b/programs/compute-budget/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-compute-budget-program" description = "Solana Compute Budget program" -version = "1.9.0" +version = "1.9.4" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-compute-budget-program" repository = "https://github.com/solana-labs/solana" @@ -10,8 +10,8 @@ license = "Apache-2.0" edition = "2021" [dependencies] -solana-program-runtime = { path = "../../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../../sdk", version = "=1.9.0" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/programs/config/Cargo.toml b/programs/config/Cargo.toml index 547d8cc9985834..8f9c2a7b2c5920 100644 --- a/programs/config/Cargo.toml +++ b/programs/config/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-config-program" -version = "1.9.0" +version = "1.9.4" description = "Solana Config program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,11 +14,11 @@ bincode = "1.3.3" chrono = { version = "0.4.11", features = ["serde"] } serde = "1.0.130" serde_derive = "1.0.103" -solana-program-runtime = { path = "../../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../../sdk", version = "=1.9.0" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } [dev-dependencies] -solana-logger = { path = "../../logger", version = "=1.9.0" } +solana-logger = { path = "../../logger", version = "=1.9.4" } [lib] crate-type = ["lib"] diff --git a/programs/stake/Cargo.toml b/programs/stake/Cargo.toml index b7a691a0da4ff3..bf7b3f9b79a8bd 100644 --- a/programs/stake/Cargo.toml +++ b/programs/stake/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-stake-program" -version = "1.9.0" +version = "1.9.4" description = "Solana Stake program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,18 +16,18 @@ num-derive = "0.3" num-traits = "0.2" serde = "1.0.130" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.0" } -solana-metrics = { path = "../../metrics", version = "=1.9.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../../sdk", version = "=1.9.0" } -solana-vote-program = { path = "../vote", version = "=1.9.0" } -solana-config-program = { path = "../config", version = "=1.9.0" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.4" } +solana-metrics = { path = "../../metrics", version = "=1.9.4" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } +solana-vote-program = { path = "../vote", version = "=1.9.4" } +solana-config-program = { path = "../config", version = "=1.9.4" } thiserror = "1.0" [dev-dependencies] proptest = "1.0" -solana-logger = { path = "../../logger", version = "=1.9.0" } +solana-logger = { path = "../../logger", version = "=1.9.4" } [build-dependencies] rustc_version = "0.4" diff --git a/programs/stake/src/stake_instruction.rs b/programs/stake/src/stake_instruction.rs index fc2e530f32a797..e9fee49396719f 100644 --- a/programs/stake/src/stake_instruction.rs +++ b/programs/stake/src/stake_instruction.rs @@ -437,7 +437,7 @@ mod tests { &preparation.message, &preparation.message.instructions[0], &program_indices, - Some(&preparation.account_indices), + &preparation.account_indices, )?; super::process_instruction(1, &instruction.data, &mut invoke_context) } @@ -1084,7 +1084,7 @@ mod tests { &preparation.message, &preparation.message.instructions[0], &program_indices, - Some(&preparation.account_indices), + &preparation.account_indices, ) .unwrap(); assert_eq!( diff --git a/programs/vote/Cargo.toml b/programs/vote/Cargo.toml index bf4dc3f3bfc7d4..5f68a2f7289b71 100644 --- a/programs/vote/Cargo.toml +++ b/programs/vote/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-vote-program" -version = "1.9.0" +version = "1.9.4" description = "Solana Vote program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -16,12 +16,12 @@ num-derive = "0.3" num-traits = "0.2" serde = "1.0.130" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.0" } -solana-logger = { path = "../../logger", version = "=1.9.0" } -solana-metrics = { path = "../../metrics", version = "=1.9.0" } -solana-program-runtime = { path = "../../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../../sdk", version = "=1.9.0" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.4" } +solana-logger = { path = "../../logger", version = "=1.9.4" } +solana-metrics = { path = "../../metrics", version = "=1.9.4" } +solana-program-runtime = { path = "../../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../../sdk", version = "=1.9.4" } thiserror = "1.0" [build-dependencies] diff --git a/programs/vote/src/vote_instruction.rs b/programs/vote/src/vote_instruction.rs index dde8ceda05b447..f64c94d22a86d1 100644 --- a/programs/vote/src/vote_instruction.rs +++ b/programs/vote/src/vote_instruction.rs @@ -408,7 +408,15 @@ pub fn process_instruction( } VoteInstruction::Withdraw(lamports) => { let to = keyed_account_at_index(keyed_accounts, first_instruction_account + 1)?; - vote_state::withdraw(me, lamports, to, &signers) + let rent_sysvar = if invoke_context + .feature_set + .is_active(&feature_set::reject_non_rent_exempt_vote_withdraws::id()) + { + Some(invoke_context.get_sysvar(&sysvar::rent::id())?) + } else { + None + }; + vote_state::withdraw(me, lamports, to, &signers, rent_sysvar) } VoteInstruction::AuthorizeChecked(vote_authorize) => { if invoke_context @@ -500,11 +508,15 @@ mod tests { .zip(accounts.into_iter()) .map(|(meta, account)| (meta.is_signer, meta.is_writable, meta.pubkey, account)) .collect(); - solana_program_runtime::invoke_context::mock_process_instruction( + + let rent = Rent::default(); + let rent_sysvar = (sysvar::rent::id(), bincode::serialize(&rent).unwrap()); + solana_program_runtime::invoke_context::mock_process_instruction_with_sysvars( &id(), Vec::new(), &instruction.data, &keyed_accounts, + &[rent_sysvar], super::process_instruction, ) } diff --git a/programs/vote/src/vote_state/mod.rs b/programs/vote/src/vote_state/mod.rs index eb865ca3d8f92d..7e1be355c3e235 100644 --- a/programs/vote/src/vote_state/mod.rs +++ b/programs/vote/src/vote_state/mod.rs @@ -890,20 +890,28 @@ pub fn withdraw( lamports: u64, to_account: &KeyedAccount, signers: &HashSet, + rent_sysvar: Option, ) -> Result<(), InstructionError> { let vote_state: VoteState = State::::state(vote_account)?.convert_to_current(); verify_authorized_signer(&vote_state.authorized_withdrawer, signers)?; - match vote_account.lamports()?.cmp(&lamports) { - Ordering::Less => return Err(InstructionError::InsufficientFunds), - Ordering::Equal => { - // Deinitialize upon zero-balance - vote_account.set_state(&VoteStateVersions::new_current(VoteState::default()))?; + let remaining_balance = vote_account + .lamports()? + .checked_sub(lamports) + .ok_or(InstructionError::InsufficientFunds)?; + + if remaining_balance == 0 { + // Deinitialize upon zero-balance + vote_account.set_state(&VoteStateVersions::new_current(VoteState::default()))?; + } else if let Some(rent_sysvar) = rent_sysvar { + let min_rent_exempt_balance = rent_sysvar.minimum_balance(vote_account.data_len()?); + if remaining_balance < min_rent_exempt_balance { + return Err(InstructionError::InsufficientFunds); } - _ => (), } + vote_account .try_account_ref_mut()? .checked_sub_lamports(lamports)?; @@ -1107,6 +1115,8 @@ mod tests { } fn create_test_account() -> (Pubkey, RefCell) { + let rent = Rent::default(); + let balance = VoteState::get_rent_exempt_reserve(&rent); let vote_pubkey = solana_sdk::pubkey::new_rand(); ( vote_pubkey, @@ -1114,7 +1124,7 @@ mod tests { &vote_pubkey, &solana_sdk::pubkey::new_rand(), 0, - 100, + balance, )), ) } @@ -1875,46 +1885,129 @@ mod tests { &RefCell::new(AccountSharedData::default()), ), &signers, + None, ); assert_eq!(res, Err(InstructionError::MissingRequiredSignature)); // insufficient funds let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; + let lamports = vote_account.borrow().lamports(); let signers: HashSet = get_signers(keyed_accounts); let res = withdraw( &keyed_accounts[0], - 101, + lamports + 1, &KeyedAccount::new( &solana_sdk::pubkey::new_rand(), false, &RefCell::new(AccountSharedData::default()), ), &signers, + None, ); assert_eq!(res, Err(InstructionError::InsufficientFunds)); - // all good - let to_account = RefCell::new(AccountSharedData::default()); - let lamports = vote_account.borrow().lamports(); - let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; - let signers: HashSet = get_signers(keyed_accounts); - let pre_state: VoteStateVersions = vote_account.borrow().state().unwrap(); - let res = withdraw( - &keyed_accounts[0], - lamports, - &KeyedAccount::new(&solana_sdk::pubkey::new_rand(), false, &to_account), - &signers, - ); - assert_eq!(res, Ok(())); - assert_eq!(vote_account.borrow().lamports(), 0); - assert_eq!(to_account.borrow().lamports(), lamports); - let post_state: VoteStateVersions = vote_account.borrow().state().unwrap(); - // State has been deinitialized since balance is zero - assert!(post_state.is_uninitialized()); + // non rent exempt withdraw, before feature activation + { + let (vote_pubkey, vote_account) = create_test_account(); + let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; + let lamports = vote_account.borrow().lamports(); + let rent_sysvar = Rent::default(); + let minimum_balance = rent_sysvar + .minimum_balance(vote_account.borrow().data().len()) + .max(1); + assert!(minimum_balance <= lamports); + let signers: HashSet = get_signers(keyed_accounts); + let res = withdraw( + &keyed_accounts[0], + lamports - minimum_balance + 1, + &KeyedAccount::new( + &solana_sdk::pubkey::new_rand(), + false, + &RefCell::new(AccountSharedData::default()), + ), + &signers, + None, + ); + assert_eq!(res, Ok(())); + } - // reset balance and restore state, verify that authorized_withdrawer works - vote_account.borrow_mut().set_lamports(lamports); - vote_account.borrow_mut().set_state(&pre_state).unwrap(); + // non rent exempt withdraw, after feature activation + { + let (vote_pubkey, vote_account) = create_test_account(); + let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; + let lamports = vote_account.borrow().lamports(); + let rent_sysvar = Rent::default(); + let minimum_balance = rent_sysvar + .minimum_balance(vote_account.borrow().data().len()) + .max(1); + assert!(minimum_balance <= lamports); + let signers: HashSet = get_signers(keyed_accounts); + let res = withdraw( + &keyed_accounts[0], + lamports - minimum_balance + 1, + &KeyedAccount::new( + &solana_sdk::pubkey::new_rand(), + false, + &RefCell::new(AccountSharedData::default()), + ), + &signers, + Some(rent_sysvar), + ); + assert_eq!(res, Err(InstructionError::InsufficientFunds)); + } + + // partial valid withdraw, after feature activation + { + let to_account = RefCell::new(AccountSharedData::default()); + let (vote_pubkey, vote_account) = create_test_account(); + let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; + let lamports = vote_account.borrow().lamports(); + let rent_sysvar = Rent::default(); + let minimum_balance = rent_sysvar + .minimum_balance(vote_account.borrow().data().len()) + .max(1); + assert!(minimum_balance <= lamports); + let withdraw_lamports = lamports - minimum_balance; + let signers: HashSet = get_signers(keyed_accounts); + let res = withdraw( + &keyed_accounts[0], + withdraw_lamports, + &KeyedAccount::new(&solana_sdk::pubkey::new_rand(), false, &to_account), + &signers, + Some(rent_sysvar), + ); + assert_eq!(res, Ok(())); + assert_eq!( + vote_account.borrow().lamports(), + lamports - withdraw_lamports + ); + assert_eq!(to_account.borrow().lamports(), withdraw_lamports); + } + + // full withdraw, before/after activation + { + let rent_sysvar = Rent::default(); + for rent_sysvar in [None, Some(rent_sysvar)] { + let to_account = RefCell::new(AccountSharedData::default()); + let (vote_pubkey, vote_account) = create_test_account(); + let lamports = vote_account.borrow().lamports(); + let keyed_accounts = &[KeyedAccount::new(&vote_pubkey, true, &vote_account)]; + let signers: HashSet = get_signers(keyed_accounts); + let res = withdraw( + &keyed_accounts[0], + lamports, + &KeyedAccount::new(&solana_sdk::pubkey::new_rand(), false, &to_account), + &signers, + rent_sysvar, + ); + assert_eq!(res, Ok(())); + assert_eq!(vote_account.borrow().lamports(), 0); + assert_eq!(to_account.borrow().lamports(), lamports); + let post_state: VoteStateVersions = vote_account.borrow().state().unwrap(); + // State has been deinitialized since balance is zero + assert!(post_state.is_uninitialized()); + } + } // authorize authorized_withdrawer let authorized_withdrawer_pubkey = solana_sdk::pubkey::new_rand(); @@ -1943,6 +2036,7 @@ mod tests { lamports, withdrawer_keyed_account, &signers, + None, ); assert_eq!(res, Ok(())); assert_eq!(vote_account.borrow().lamports(), 0); diff --git a/rayon-threadlimit/Cargo.toml b/rayon-threadlimit/Cargo.toml index 7bc05cf140ed7f..6a25c1d8257adb 100644 --- a/rayon-threadlimit/Cargo.toml +++ b/rayon-threadlimit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rayon-threadlimit" -version = "1.9.0" +version = "1.9.4" description = "solana-rayon-threadlimit" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-rayon-threadlimit" diff --git a/rbpf-cli/Cargo.toml b/rbpf-cli/Cargo.toml index 31d3484f41fdcf..45c3bd622abd5d 100644 --- a/rbpf-cli/Cargo.toml +++ b/rbpf-cli/Cargo.toml @@ -1,20 +1,21 @@ [package] name = "rbpf-cli" -version = "1.9.0" +version = "1.9.4" description = "CLI to test and analyze eBPF programs" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/rbpf" homepage = "https://solana.com/" keywords = ["BPF", "eBPF", "interpreter", "JIT"] edition = "2021" +publish = false [dependencies] clap = "3.0.0-beta.2" serde = "1.0.130" serde_json = "1.0.72" -solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana_rbpf = "=0.2.16" +solana-bpf-loader-program = { path = "../programs/bpf_loader", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana_rbpf = "=0.2.21" time = "0.3.5" diff --git a/rbpf-cli/src/main.rs b/rbpf-cli/src/main.rs index 92a59c70a62b4f..ee40c6e5b2a4a6 100644 --- a/rbpf-cli/src/main.rs +++ b/rbpf-cli/src/main.rs @@ -213,7 +213,7 @@ native machine code before execting it in the virtual machine.", &preparation.message, &preparation.message.instructions[0], &program_indices, - Some(&preparation.account_indices), + &preparation.account_indices, ) .unwrap(); let keyed_accounts = invoke_context.get_keyed_accounts().unwrap(); @@ -257,7 +257,7 @@ native machine code before execting it in the virtual machine.", let text_bytes = executable.get_text_bytes().1; check(text_bytes, &config).unwrap(); } - executable.jit_compile().unwrap(); + Executable::::jit_compile(&mut executable).unwrap(); let analysis = Analysis::from_executable(&executable); match matches.value_of("use") { diff --git a/remote-wallet/Cargo.toml b/remote-wallet/Cargo.toml index aed3fbfd03fef0..d66c915a7113ee 100644 --- a/remote-wallet/Cargo.toml +++ b/remote-wallet/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-remote-wallet" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -20,7 +20,7 @@ num-traits = { version = "0.2" } parking_lot = "0.11" qstring = "0.7.2" semver = "1.0" -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } thiserror = "1.0" uriparse = "0.6.3" diff --git a/replica-lib/Cargo.toml b/replica-lib/Cargo.toml index f2ef2dbe9116c3..031144a3eb68a8 100644 --- a/replica-lib/Cargo.toml +++ b/replica-lib/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-replica-lib" description = "The library used for replication by both the client and server" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,9 +14,9 @@ crossbeam-channel = "0.5" futures-util = "0.3" log = "0.4.11" prost = "0.9.0" -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } tokio = { version = "1", features = ["full"] } tonic = { version = "0.6.1", features = ["tls", "transport"] } diff --git a/replica-node/Cargo.toml b/replica-node/Cargo.toml index 291a00b736de92..11ba4788f7f1c8 100644 --- a/replica-node/Cargo.toml +++ b/replica-node/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-replica-node" description = "Solana replication node" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,26 +14,26 @@ clap = "2.33.1" crossbeam-channel = "0.5" log = "0.4.14" rand = "0.7.0" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-download-utils = { path = "../download-utils", version = "=1.9.0" } -solana-genesis-utils = { path = "../genesis-utils", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-validator = { path = "../validator", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-download-utils = { path = "../download-utils", version = "=1.9.4" } +solana-genesis-utils = { path = "../genesis-utils", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-replica-lib = { path = "../replica-lib", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-validator = { path = "../validator", version = "=1.9.4" } [dev-dependencies] serial_test = "0.5.1" -solana-core = { path = "../core", version = "=1.9.0" } -solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" } tempfile = "3.2.0" diff --git a/replica-node/src/replica_node.rs b/replica-node/src/replica_node.rs index 351cdfdaba0b91..021783a2f8b3a3 100644 --- a/replica-node/src/replica_node.rs +++ b/replica-node/src/replica_node.rs @@ -191,6 +191,8 @@ fn start_client_rpc_services( let subscriptions = Arc::new(RpcSubscriptions::new( &exit, + max_complete_transaction_status_slot.clone(), + blockstore.clone(), bank_forks.clone(), block_commitment_cache.clone(), optimistically_confirmed_bank.clone(), diff --git a/rpc-test/Cargo.toml b/rpc-test/Cargo.toml index 7955dc7daf8a5b..d17826613fe9f1 100644 --- a/rpc-test/Cargo.toml +++ b/rpc-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rpc-test" -version = "1.9.0" +version = "1.9.4" description = "Solana RPC Test" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -18,17 +18,17 @@ log = "0.4.11" reqwest = { version = "0.11.5", default-features = false, features = ["blocking", "rustls-tls", "json"] } serde = "1.0.130" serde_json = "1.0.72" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-test-validator = { path = "../test-validator", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-test-validator = { path = "../test-validator", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } tokio = { version = "1", features = ["full"] } [dev-dependencies] -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 26d2e9ab580410..28f10a9f7dcb93 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-rpc" -version = "1.9.0" +version = "1.9.4" description = "Solana RPC" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -30,25 +30,25 @@ serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" soketto = "0.7" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-poh = { path = "../poh", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-poh = { path = "../poh", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-storage-bigtable = { path = "../storage-bigtable", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } spl-token = { version = "=3.2.0", features = ["no-entrypoint"] } stream-cancel = "0.8.1" thiserror = "1.0" @@ -57,8 +57,8 @@ tokio-util = { version = "0.6", features = ["codec", "compat"] } [dev-dependencies] serial_test = "0.5.1" -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } symlink = "0.1.0" [lib] diff --git a/rpc/src/cluster_tpu_info.rs b/rpc/src/cluster_tpu_info.rs index cf567bd5a482f1..5421bc7b2b9b3d 100644 --- a/rpc/src/cluster_tpu_info.rs +++ b/rpc/src/cluster_tpu_info.rs @@ -91,11 +91,7 @@ mod test { &validator_vote_keypairs1, &validator_vote_keypairs2, ]; - let GenesisConfigInfo { - genesis_config, - mint_keypair: _, - voting_keypair: _, - } = create_genesis_config_with_vote_accounts( + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, vec![10_000; 3], diff --git a/rpc/src/optimistically_confirmed_bank_tracker.rs b/rpc/src/optimistically_confirmed_bank_tracker.rs index d730fb4891d3b4..95f2f863d92f25 100644 --- a/rpc/src/optimistically_confirmed_bank_tracker.rs +++ b/rpc/src/optimistically_confirmed_bank_tracker.rs @@ -321,6 +321,7 @@ mod tests { accounts_background_service::AbsRequestSender, commitment::BlockCommitmentCache, }, solana_sdk::pubkey::Pubkey, + std::sync::atomic::AtomicU64, }; #[test] @@ -343,8 +344,10 @@ mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::default())); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache, optimistically_confirmed_bank.clone(), diff --git a/rpc/src/rpc.rs b/rpc/src/rpc.rs index e73e9d2b18b87a..a4327a2a670438 100644 --- a/rpc/src/rpc.rs +++ b/rpc/src/rpc.rs @@ -31,7 +31,9 @@ use { solana_faucet::faucet::request_airdrop_transaction, solana_gossip::{cluster_info::ClusterInfo, contact_info::ContactInfo}, solana_ledger::{ - blockstore::Blockstore, blockstore_db::BlockstoreError, get_tmp_ledger_path, + blockstore::{Blockstore, SignatureInfosForAddress}, + blockstore_db::BlockstoreError, + get_tmp_ledger_path, leader_schedule_cache::LeaderScheduleCache, }, solana_metrics::inc_new_counter_info, @@ -71,10 +73,12 @@ use { send_transaction_service::{SendTransactionService, TransactionInfo}, tpu_info::NullTpuInfo, }, + solana_storage_bigtable::Error as StorageError, solana_streamer::socket::SocketAddrSpace, solana_transaction_status::{ - ConfirmedBlock, EncodedConfirmedTransaction, Reward, RewardType, - TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, UiTransactionEncoding, + ConfirmedBlock, ConfirmedTransactionStatusWithSignature, EncodedConfirmedTransaction, + Reward, RewardType, TransactionConfirmationStatus, TransactionStatus, UiConfirmedBlock, + UiTransactionEncoding, }, solana_vote_program::vote_state::{VoteState, MAX_LOCKOUT_HISTORY}, spl_token::{ @@ -1439,7 +1443,7 @@ impl JsonRpcRequestProcessor { pub async fn get_signatures_for_address( &self, address: Pubkey, - mut before: Option, + before: Option, until: Option, mut limit: usize, commitment: Option, @@ -1460,29 +1464,86 @@ impl JsonRpcRequestProcessor { highest_confirmed_root }; - let mut results = self + let SignatureInfosForAddress { + infos: mut results, + found_before, + } = self .blockstore .get_confirmed_signatures_for_address2(address, highest_slot, before, until, limit) .map_err(|err| Error::invalid_params(format!("{}", err)))?; + let map_results = |results: Vec| { + results + .into_iter() + .map(|x| { + let mut item: RpcConfirmedTransactionStatusWithSignature = x.into(); + if item.slot <= highest_confirmed_root { + item.confirmation_status = + Some(TransactionConfirmationStatus::Finalized); + } else { + item.confirmation_status = + Some(TransactionConfirmationStatus::Confirmed); + if item.block_time.is_none() { + let r_bank_forks = self.bank_forks.read().unwrap(); + item.block_time = r_bank_forks + .get(item.slot) + .map(|bank| bank.clock().unix_timestamp); + } + } + item + }) + .collect() + }; + if results.len() < limit { if let Some(bigtable_ledger_storage) = &self.bigtable_ledger_storage { + let mut bigtable_before = before; if !results.is_empty() { limit -= results.len(); - before = results.last().map(|x| x.signature); + bigtable_before = results.last().map(|x| x.signature); + } + + // If the oldest address-signature found in Blockstore has not yet been + // uploaded to long-term storage, modify the storage query to return all latest + // signatures to prevent erroring on RowNotFound. This can race with upload. + if found_before && bigtable_before.is_some() { + match bigtable_ledger_storage + .get_signature_status(&bigtable_before.unwrap()) + .await + { + Err(StorageError::SignatureNotFound) => { + bigtable_before = None; + } + Err(err) => { + warn!("{:?}", err); + return Ok(map_results(results)); + } + Ok(_) => {} + } } let bigtable_results = bigtable_ledger_storage .get_confirmed_signatures_for_address( &address, - before.as_ref(), + bigtable_before.as_ref(), until.as_ref(), limit, ) .await; match bigtable_results { Ok(bigtable_results) => { - results.extend(bigtable_results.into_iter().map(|x| x.0)); + let results_set: HashSet<_> = + results.iter().map(|result| result.signature).collect(); + for (bigtable_result, _) in bigtable_results { + // In the upload race condition, latest address-signatures in + // long-term storage may include original `before` signature... + if before != Some(bigtable_result.signature) + // ...or earlier Blockstore signatures + && !results_set.contains(&bigtable_result.signature) + { + results.push(bigtable_result); + } + } } Err(err) => { warn!("{:?}", err); @@ -1491,24 +1552,7 @@ impl JsonRpcRequestProcessor { } } - Ok(results - .into_iter() - .map(|x| { - let mut item: RpcConfirmedTransactionStatusWithSignature = x.into(); - if item.slot <= highest_confirmed_root { - item.confirmation_status = Some(TransactionConfirmationStatus::Finalized); - } else { - item.confirmation_status = Some(TransactionConfirmationStatus::Confirmed); - if item.block_time.is_none() { - let r_bank_forks = self.bank_forks.read().unwrap(); - item.block_time = r_bank_forks - .get(item.slot) - .map(|bank| bank.clock().unix_timestamp); - } - } - item - }) - .collect()) + Ok(map_results(results)) } else { Err(RpcCustomError::TransactionHistoryNotAvailable.into()) } @@ -2105,7 +2149,7 @@ fn verify_and_parse_signatures_for_address_params( Ok((address, before, until, limit)) } -fn check_is_at_least_confirmed(commitment: CommitmentConfig) -> Result<()> { +pub(crate) fn check_is_at_least_confirmed(commitment: CommitmentConfig) -> Result<()> { if !commitment.is_at_least_confirmed() { return Err(Error::invalid_params( "Method does not support commitment below `confirmed`", @@ -3520,6 +3564,7 @@ pub mod rpc_full { if config.sig_verify { verify_transaction(&transaction, &bank.feature_set)?; } + let number_of_accounts = transaction.message().account_keys_len(); let TransactionSimulationResult { result, @@ -3539,28 +3584,36 @@ pub mod rpc_full { return Err(Error::invalid_params("base58 encoding not supported")); } - if config_accounts.addresses.len() > post_simulation_accounts.len() { + if config_accounts.addresses.len() > number_of_accounts { return Err(Error::invalid_params(format!( "Too many accounts provided; max {}", - post_simulation_accounts.len() + number_of_accounts ))); } - let mut accounts = vec![]; - for address_str in config_accounts.addresses { - let address = verify_pubkey(&address_str)?; - accounts.push(if result.is_err() { - None - } else { - post_simulation_accounts - .iter() - .find(|(key, _account)| key == &address) - .map(|(pubkey, account)| { - UiAccount::encode(pubkey, account, accounts_encoding, None, None) - }) - }); + if result.is_err() { + Some(vec![None; config_accounts.addresses.len()]) + } else { + let mut accounts = vec![]; + for address_str in config_accounts.addresses { + let address = verify_pubkey(&address_str)?; + accounts.push( + post_simulation_accounts + .iter() + .find(|(key, _account)| key == &address) + .map(|(pubkey, account)| { + UiAccount::encode( + pubkey, + account, + accounts_encoding, + None, + None, + ) + }), + ); + } + Some(accounts) } - Some(accounts) } else { None }; @@ -6270,6 +6323,7 @@ pub mod tests { mut genesis_config, mint_keypair, voting_keypair, + .. } = create_genesis_config(TEST_MINT_LAMPORTS); genesis_config.rent.lamports_per_byte_year = 50; @@ -7802,9 +7856,10 @@ pub mod tests { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache.clone(), optimistically_confirmed_bank.clone(), diff --git a/rpc/src/rpc_pubsub.rs b/rpc/src/rpc_pubsub.rs index 4075c6aacd9955..7b9053aeccca89 100644 --- a/rpc/src/rpc_pubsub.rs +++ b/rpc/src/rpc_pubsub.rs @@ -2,11 +2,13 @@ use { crate::{ + rpc::check_is_at_least_confirmed, rpc_pubsub_service::PubSubConfig, rpc_subscription_tracker::{ - AccountSubscriptionParams, LogsSubscriptionKind, LogsSubscriptionParams, - ProgramSubscriptionParams, SignatureSubscriptionParams, SubscriptionControl, - SubscriptionId, SubscriptionParams, SubscriptionToken, + AccountSubscriptionParams, BlockSubscriptionKind, BlockSubscriptionParams, + LogsSubscriptionKind, LogsSubscriptionParams, ProgramSubscriptionParams, + SignatureSubscriptionParams, SubscriptionControl, SubscriptionId, SubscriptionParams, + SubscriptionToken, }, }, dashmap::DashMap, @@ -16,15 +18,17 @@ use { solana_account_decoder::{UiAccount, UiAccountEncoding}, solana_client::{ rpc_config::{ - RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, - RpcTransactionLogsConfig, RpcTransactionLogsFilter, + RpcAccountInfoConfig, RpcBlockSubscribeConfig, RpcBlockSubscribeFilter, + RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, RpcTransactionLogsConfig, + RpcTransactionLogsFilter, }, rpc_response::{ - Response as RpcResponse, RpcKeyedAccount, RpcLogsResponse, RpcSignatureResult, RpcVote, - SlotInfo, SlotUpdate, + Response as RpcResponse, RpcBlockUpdate, RpcKeyedAccount, RpcLogsResponse, + RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, }, }, solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, + solana_transaction_status::UiTransactionEncoding, std::{str::FromStr, sync::Arc}, }; @@ -187,6 +191,28 @@ pub trait RpcSolPubSub { id: PubSubSubscriptionId, ) -> Result; + // Subscribe to block data and content + #[pubsub(subscription = "blockNotification", subscribe, name = "blockSubscribe")] + fn block_subscribe( + &self, + meta: Self::Metadata, + subscriber: Subscriber>, + filter: RpcBlockSubscribeFilter, + config: Option, + ); + + // Unsubscribe from block notification subscription. + #[pubsub( + subscription = "blockNotification", + unsubscribe, + name = "blockUnsubscribe" + )] + fn block_unsubscribe( + &self, + meta: Option, + id: PubSubSubscriptionId, + ) -> Result; + // Get notification when vote is encountered #[pubsub(subscription = "voteNotification", subscribe, name = "voteSubscribe")] fn vote_subscribe(&self, meta: Self::Metadata, subscriber: Subscriber); @@ -295,6 +321,18 @@ mod internal { #[rpc(name = "slotsUpdatesUnsubscribe")] fn slots_updates_unsubscribe(&self, id: SubscriptionId) -> Result; + // Subscribe to block data and content + #[rpc(name = "blockSubscribe")] + fn block_subscribe( + &self, + filter: RpcBlockSubscribeFilter, + config: Option, + ) -> Result; + + // Unsubscribe from block notification subscription. + #[rpc(name = "blockUnsubscribe")] + fn block_unsubscribe(&self, id: SubscriptionId) -> Result; + // Get notification when vote is encountered #[rpc(name = "voteSubscribe")] fn vote_subscribe(&self) -> Result; @@ -475,6 +513,42 @@ impl RpcSolPubSubInternal for RpcSolPubSubImpl { self.unsubscribe(id) } + fn block_subscribe( + &self, + filter: RpcBlockSubscribeFilter, + config: Option, + ) -> Result { + if !self.config.enable_block_subscription { + return Err(Error::new(jsonrpc_core::ErrorCode::MethodNotFound)); + } + let config = config.unwrap_or_default(); + let commitment = config.commitment.unwrap_or_default(); + check_is_at_least_confirmed(commitment)?; + let params = BlockSubscriptionParams { + commitment: config.commitment.unwrap_or_default(), + encoding: config.encoding.unwrap_or(UiTransactionEncoding::Base64), + kind: match filter { + RpcBlockSubscribeFilter::All => BlockSubscriptionKind::All, + RpcBlockSubscribeFilter::MentionsAccountOrProgram(key) => { + BlockSubscriptionKind::MentionsAccountOrProgram(param::( + &key, + "mentions_account_or_program", + )?) + } + }, + transaction_details: config.transaction_details.unwrap_or_default(), + show_rewards: config.show_rewards.unwrap_or_default(), + }; + self.subscribe(SubscriptionParams::Block(params)) + } + + fn block_unsubscribe(&self, id: SubscriptionId) -> Result { + if !self.config.enable_block_subscription { + return Err(Error::new(jsonrpc_core::ErrorCode::MethodNotFound)); + } + self.unsubscribe(id) + } + fn vote_subscribe(&self) -> Result { if !self.config.enable_vote_subscription { return Err(Error::new(jsonrpc_core::ErrorCode::MethodNotFound)); @@ -539,7 +613,10 @@ mod tests { solana_stake_program::stake_state, solana_vote_program::vote_state::Vote, std::{ - sync::{atomic::AtomicBool, RwLock}, + sync::{ + atomic::{AtomicBool, AtomicU64}, + RwLock, + }, thread::sleep, time::Duration, }, @@ -578,8 +655,10 @@ mod tests { let bank = Bank::new_for_tests(&genesis_config); let blockhash = bank.last_blockhash(); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &Arc::new(AtomicBool::new(false)), + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -705,7 +784,11 @@ mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let mut io = IoHandler::<()>::default(); - let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); let (rpc, _receiver) = rpc_pubsub_service::test_connection(&subscriptions); io.extend_with(rpc.to_delegate()); @@ -756,9 +839,10 @@ mod tests { let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &Arc::new(AtomicBool::new(false)), + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -873,9 +957,10 @@ mod tests { let bank0 = bank_forks.read().unwrap().get(0).unwrap().clone(); let bank1 = Bank::new_from_parent(&bank0, &Pubkey::default(), 1); bank_forks.write().unwrap().insert(bank1); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &Arc::new(AtomicBool::new(false)), + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -963,7 +1048,11 @@ mod tests { )))); let mut io = IoHandler::<()>::default(); - let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); let (rpc, _receiver) = rpc_pubsub_service::test_connection(&subscriptions); io.extend_with(rpc.to_delegate()); @@ -1007,8 +1096,10 @@ mod tests { let bob = Keypair::new(); let exit = Arc::new(AtomicBool::new(false)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let rpc_subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -1058,9 +1149,10 @@ mod tests { let exit = Arc::new(AtomicBool::new(false)); let block_commitment_cache = Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), block_commitment_cache, OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks), @@ -1128,7 +1220,11 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); let (rpc, mut receiver) = rpc_pubsub_service::test_connection(&rpc_subscriptions); rpc.slot_subscribe().unwrap(); @@ -1156,7 +1252,11 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); let (rpc, mut receiver) = rpc_pubsub_service::test_connection(&rpc_subscriptions); let sub_id = rpc.slot_subscribe().unwrap(); @@ -1198,8 +1298,10 @@ mod tests { // Setup Subscriptions let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, block_commitment_cache, optimistically_confirmed_bank, @@ -1228,7 +1330,11 @@ mod tests { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let rpc_subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); let (rpc, _receiver) = rpc_pubsub_service::test_connection(&rpc_subscriptions); let sub_id = rpc.vote_subscribe().unwrap(); diff --git a/rpc/src/rpc_pubsub_service.rs b/rpc/src/rpc_pubsub_service.rs index a8f6c3745782d7..78e7a113f34778 100644 --- a/rpc/src/rpc_pubsub_service.rs +++ b/rpc/src/rpc_pubsub_service.rs @@ -33,6 +33,7 @@ pub const DEFAULT_WORKER_THREADS: usize = 1; #[derive(Debug, Clone)] pub struct PubSubConfig { + pub enable_block_subscription: bool, pub enable_vote_subscription: bool, pub max_active_subscriptions: usize, pub queue_capacity_items: usize, @@ -44,6 +45,7 @@ pub struct PubSubConfig { impl Default for PubSubConfig { fn default() -> Self { Self { + enable_block_subscription: false, enable_vote_subscription: false, max_active_subscriptions: MAX_ACTIVE_SUBSCRIPTIONS, queue_capacity_items: DEFAULT_QUEUE_CAPACITY_ITEMS, @@ -57,6 +59,7 @@ impl Default for PubSubConfig { impl PubSubConfig { pub fn default_for_tests() -> Self { Self { + enable_block_subscription: false, enable_vote_subscription: false, max_active_subscriptions: MAX_ACTIVE_SUBSCRIPTIONS, queue_capacity_items: DEFAULT_TEST_QUEUE_CAPACITY_ITEMS, @@ -142,6 +145,9 @@ fn count_final(params: &SubscriptionParams) { SubscriptionParams::Vote => { inc_new_counter_info!("rpc-pubsub-final-votes", 1); } + SubscriptionParams::Block(_) => { + inc_new_counter_info!("rpc-pubsub-final-slot-txs", 1); + } } } @@ -187,16 +193,17 @@ pub struct TestBroadcastReceiver { #[cfg(test)] impl TestBroadcastReceiver { pub fn recv(&mut self) -> String { - use { - std::{ - thread::sleep, - time::{Duration, Instant}, - }, - tokio::sync::broadcast::error::TryRecvError, + return match self.recv_timeout(std::time::Duration::from_secs(5)) { + Err(err) => panic!("broadcast receiver error: {}", err), + Ok(str) => str, }; + } - let timeout = Duration::from_secs(5); - let started = Instant::now(); + pub fn recv_timeout(&mut self, timeout: std::time::Duration) -> Result { + use std::thread::sleep; + use tokio::sync::broadcast::error::TryRecvError; + + let started = std::time::Instant::now(); loop { match self.inner.try_recv() { @@ -206,17 +213,16 @@ impl TestBroadcastReceiver { started.elapsed().as_millis() ); if let Some(json) = self.handler.handle(notification).expect("handler failed") { - return json.to_string(); + return Ok(json.to_string()); } } Err(TryRecvError::Empty) => { - assert!( - started.elapsed() <= timeout, - "TestBroadcastReceiver: no data, timeout reached" - ); - sleep(Duration::from_millis(50)); + if started.elapsed() > timeout { + return Err("TestBroadcastReceiver: no data, timeout reached".into()); + } + sleep(std::time::Duration::from_millis(50)); } - Err(err) => panic!("broadcast receiver error: {}", err), + Err(e) => return Err(e.to_string()), } } } @@ -230,6 +236,7 @@ pub fn test_connection( let rpc_impl = RpcSolPubSubImpl::new( PubSubConfig { + enable_block_subscription: true, enable_vote_subscription: true, queue_capacity_items: 100, ..PubSubConfig::default() @@ -383,7 +390,10 @@ mod tests { }, std::{ net::{IpAddr, Ipv4Addr}, - sync::{atomic::AtomicBool, RwLock}, + sync::{ + atomic::{AtomicBool, AtomicU64}, + RwLock, + }, }, }; @@ -391,6 +401,7 @@ mod tests { fn test_pubsub_new() { let pubsub_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 0); let exit = Arc::new(AtomicBool::new(false)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); let bank = Bank::new_for_tests(&genesis_config); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); @@ -398,6 +409,7 @@ mod tests { OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, diff --git a/rpc/src/rpc_subscription_tracker.rs b/rpc/src/rpc_subscription_tracker.rs index 36ef4d5e99710b..c49ff6fd942a24 100644 --- a/rpc/src/rpc_subscription_tracker.rs +++ b/rpc/src/rpc_subscription_tracker.rs @@ -11,6 +11,7 @@ use { solana_sdk::{ clock::Slot, commitment_config::CommitmentConfig, pubkey::Pubkey, signature::Signature, }, + solana_transaction_status::{TransactionDetails, UiTransactionEncoding}, std::{ collections::{ hash_map::{Entry, HashMap}, @@ -44,6 +45,7 @@ impl From for u64 { #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum SubscriptionParams { Account(AccountSubscriptionParams), + Block(BlockSubscriptionParams), Logs(LogsSubscriptionParams), Program(ProgramSubscriptionParams), Signature(SignatureSubscriptionParams), @@ -62,6 +64,7 @@ impl SubscriptionParams { SubscriptionParams::Signature(_) => "signatureNotification", SubscriptionParams::Slot => "slotNotification", SubscriptionParams::SlotsUpdates => "slotsUpdatesNotification", + SubscriptionParams::Block(_) => "blockNotification", SubscriptionParams::Root => "rootNotification", SubscriptionParams::Vote => "voteNotification", } @@ -73,6 +76,7 @@ impl SubscriptionParams { SubscriptionParams::Logs(params) => Some(params.commitment), SubscriptionParams::Program(params) => Some(params.commitment), SubscriptionParams::Signature(params) => Some(params.commitment), + SubscriptionParams::Block(params) => Some(params.commitment), SubscriptionParams::Slot | SubscriptionParams::SlotsUpdates | SubscriptionParams::Root @@ -83,12 +87,13 @@ impl SubscriptionParams { fn is_commitment_watcher(&self) -> bool { let commitment = match self { SubscriptionParams::Account(params) => ¶ms.commitment, + SubscriptionParams::Block(params) => ¶ms.commitment, SubscriptionParams::Logs(params) => ¶ms.commitment, SubscriptionParams::Program(params) => ¶ms.commitment, SubscriptionParams::Signature(params) => ¶ms.commitment, - SubscriptionParams::Slot + SubscriptionParams::Root + | SubscriptionParams::Slot | SubscriptionParams::SlotsUpdates - | SubscriptionParams::Root | SubscriptionParams::Vote => return false, }; !commitment.is_confirmed() @@ -97,12 +102,13 @@ impl SubscriptionParams { fn is_gossip_watcher(&self) -> bool { let commitment = match self { SubscriptionParams::Account(params) => ¶ms.commitment, + SubscriptionParams::Block(params) => ¶ms.commitment, SubscriptionParams::Logs(params) => ¶ms.commitment, SubscriptionParams::Program(params) => ¶ms.commitment, SubscriptionParams::Signature(params) => ¶ms.commitment, - SubscriptionParams::Slot + SubscriptionParams::Root + | SubscriptionParams::Slot | SubscriptionParams::SlotsUpdates - | SubscriptionParams::Root | SubscriptionParams::Vote => return false, }; commitment.is_confirmed() @@ -127,6 +133,21 @@ pub struct AccountSubscriptionParams { pub commitment: CommitmentConfig, } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct BlockSubscriptionParams { + pub commitment: CommitmentConfig, + pub encoding: UiTransactionEncoding, + pub kind: BlockSubscriptionKind, + pub transaction_details: TransactionDetails, + pub show_rewards: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum BlockSubscriptionKind { + All, + MentionsAccountOrProgram(Pubkey), +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct LogsSubscriptionParams { pub kind: LogsSubscriptionKind, @@ -159,9 +180,10 @@ pub struct SignatureSubscriptionParams { #[derive(Clone)] pub struct SubscriptionControl(Arc); +pub struct WeakSubscriptionTokenRef(Weak, SubscriptionId); struct SubscriptionControlInner { - subscriptions: DashMap>, + subscriptions: DashMap, next_id: AtomicU64, max_active_subscriptions: usize, sender: crossbeam_channel::Sender, @@ -195,33 +217,44 @@ impl SubscriptionControl { self.0.subscriptions.len() ); let count = self.0.subscriptions.len(); - match self.0.subscriptions.entry(params) { - DashEntry::Occupied(entry) => Ok(SubscriptionToken( - entry - .get() - .upgrade() - .expect("dead subscription encountered in SubscriptionControl"), + let create_token_and_weak_ref = |id, params| { + let token = SubscriptionToken( + Arc::new(SubscriptionTokenInner { + control: Arc::clone(&self.0), + params, + id, + }), self.0.counter.create_token(), - )), + ); + let weak_ref = WeakSubscriptionTokenRef(Arc::downgrade(&token.0), token.0.id); + (token, weak_ref) + }; + + match self.0.subscriptions.entry(params) { + DashEntry::Occupied(mut entry) => match entry.get().0.upgrade() { + Some(token_ref) => Ok(SubscriptionToken(token_ref, self.0.counter.create_token())), + // This means the last Arc for this Weak pointer entered the drop just before us, + // but could not remove the entry since we are holding the write lock. + // See `Drop` implementation for `SubscriptionTokenInner` for further info. + None => { + let (token, weak_ref) = + create_token_and_weak_ref(entry.get().1, entry.key().clone()); + entry.insert(weak_ref); + Ok(token) + } + }, DashEntry::Vacant(entry) => { if count >= self.0.max_active_subscriptions { inc_new_counter_info!("rpc-subscription-refused-limit-reached", 1); return Err(Error::TooManySubscriptions); } let id = SubscriptionId::from(self.0.next_id.fetch_add(1, Ordering::AcqRel)); - let token = SubscriptionToken( - Arc::new(SubscriptionTokenInner { - control: Arc::clone(&self.0), - params: entry.key().clone(), - id, - }), - self.0.counter.create_token(), - ); + let (token, weak_ref) = create_token_and_weak_ref(id, entry.key().clone()); let _ = self .0 .sender .send(NotificationEntry::Subscribed(token.0.params.clone(), id).into()); - entry.insert(Arc::downgrade(&token.0)); + entry.insert(weak_ref); datapoint_info!( "rpc-subscription", ("total", self.0.subscriptions.len(), i64) @@ -473,12 +506,15 @@ impl SubscriptionsTracker { ) -> &HashMap>> { &self.by_signature } + pub fn commitment_watchers(&self) -> &HashMap> { &self.commitment_watchers } + pub fn gossip_watchers(&self) -> &HashMap> { &self.gossip_watchers } + pub fn node_progress_watchers(&self) -> &HashMap> { &self.node_progress_watchers } @@ -505,7 +541,9 @@ impl Drop for SubscriptionTokenInner { DashEntry::Vacant(_) => { warn!("Subscriptions inconsistency (missing entry in by_params)"); } - DashEntry::Occupied(entry) => { + // Check the strong refs count to ensure no other thread recreated this subscription (not token) + // while we were acquiring the lock. + DashEntry::Occupied(entry) if entry.get().0.strong_count() == 0 => { let _ = self .control .sender @@ -516,6 +554,9 @@ impl Drop for SubscriptionTokenInner { ("total", self.control.subscriptions.len(), i64) ); } + // This branch handles the case in which this entry got recreated + // while we were waiting for the lock (inside the `DashMap::entry` method). + DashEntry::Occupied(_entry) /* if _entry.get().0.strong_count() > 0 */ => (), } } } diff --git a/rpc/src/rpc_subscriptions.rs b/rpc/src/rpc_subscriptions.rs index 35256667815891..f2a8597845ffff 100644 --- a/rpc/src/rpc_subscriptions.rs +++ b/rpc/src/rpc_subscriptions.rs @@ -1,14 +1,14 @@ //! The `pubsub` module implements a threaded subscription service on client RPC request - use { crate::{ optimistically_confirmed_bank_tracker::OptimisticallyConfirmedBank, parsed_token_accounts::{get_parsed_token_account, get_parsed_token_accounts}, rpc_pubsub_service::PubSubConfig, rpc_subscription_tracker::{ - AccountSubscriptionParams, LogsSubscriptionKind, LogsSubscriptionParams, - ProgramSubscriptionParams, SignatureSubscriptionParams, SubscriptionControl, - SubscriptionId, SubscriptionInfo, SubscriptionParams, SubscriptionsTracker, + AccountSubscriptionParams, BlockSubscriptionKind, BlockSubscriptionParams, + LogsSubscriptionKind, LogsSubscriptionParams, ProgramSubscriptionParams, + SignatureSubscriptionParams, SubscriptionControl, SubscriptionId, SubscriptionInfo, + SubscriptionParams, SubscriptionsTracker, }, }, crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender}, @@ -18,10 +18,12 @@ use { solana_client::{ rpc_filter::RpcFilterType, rpc_response::{ - ProcessedSignatureResult, ReceivedSignatureResult, Response, RpcKeyedAccount, - RpcLogsResponse, RpcResponseContext, RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, + ProcessedSignatureResult, ReceivedSignatureResult, Response, RpcBlockUpdate, + RpcBlockUpdateError, RpcKeyedAccount, RpcLogsResponse, RpcResponseContext, + RpcSignatureResult, RpcVote, SlotInfo, SlotUpdate, }, }, + solana_ledger::{blockstore::Blockstore, get_tmp_ledger_path}, solana_measure::measure::Measure, solana_rayon_threadlimit::get_thread_count, solana_runtime::{ @@ -37,6 +39,7 @@ use { timing::timestamp, transaction, }, + solana_transaction_status::ConfirmedBlock, solana_vote_program::vote_state::Vote, std::{ cell::RefCell, @@ -44,7 +47,7 @@ use { io::Cursor, iter, str, sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, + atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering}, Arc, Mutex, RwLock, Weak, }, thread::{Builder, JoinHandle}, @@ -130,7 +133,7 @@ fn check_commitment_and_notify( params: &P, subscription: &SubscriptionInfo, bank_forks: &Arc>, - commitment_slots: &CommitmentSlots, + slot: Slot, bank_method: B, filter_results: F, notifier: &RpcNotifier, @@ -142,20 +145,6 @@ where F: Fn(X, &P, Slot, Arc) -> (Box>, Slot), X: Clone + Default, { - let commitment = if let Some(commitment) = subscription.commitment() { - commitment - } else { - error!("missing commitment in check_commitment_and_notify"); - return false; - }; - let slot = if commitment.is_finalized() { - commitment_slots.highest_confirmed_root - } else if commitment.is_confirmed() { - commitment_slots.highest_confirmed_slot - } else { - commitment_slots.slot - }; - let mut notified = false; if let Some(bank) = bank_forks.read().unwrap().get(slot).cloned() { let results = bank_method(&bank, params); @@ -175,6 +164,7 @@ where notified = true; } } + notified } @@ -287,6 +277,46 @@ impl RpcNotifier { } } +fn filter_block_result_txs( + block: ConfirmedBlock, + last_modified_slot: Slot, + params: &BlockSubscriptionParams, +) -> Option { + let transactions = match params.kind { + BlockSubscriptionKind::All => block.transactions, + BlockSubscriptionKind::MentionsAccountOrProgram(pk) => block + .transactions + .into_iter() + .filter(|tx| tx.transaction.message.account_keys.contains(&pk)) + .collect(), + }; + + if transactions.is_empty() { + if let BlockSubscriptionKind::MentionsAccountOrProgram(_) = params.kind { + return None; + } + } + + let block = ConfirmedBlock { + transactions, + ..block + } + .configure( + params.encoding, + params.transaction_details, + params.show_rewards, + ); + + // If last_modified_slot < last_notified_slot, then the last notif was for a fork. + // That's the risk clients take when subscribing to non-finalized commitments. + // This code lets the logic for dealing with forks live on the client side. + Some(RpcBlockUpdate { + slot: last_modified_slot, + block: Some(block), + err: None, + }) +} + fn filter_account_result( result: Option<(AccountSharedData, Slot)>, params: &AccountSubscriptionParams, @@ -416,14 +446,7 @@ fn initial_last_notified_slot( 0 } } - // last_notified_slot is not utilized for these subscriptions - SubscriptionParams::Logs(_) - | SubscriptionParams::Program(_) - | SubscriptionParams::Signature(_) - | SubscriptionParams::Slot - | SubscriptionParams::SlotsUpdates - | SubscriptionParams::Root - | SubscriptionParams::Vote => 0, + _ => 0, } } @@ -480,12 +503,16 @@ impl Drop for RpcSubscriptions { impl RpcSubscriptions { pub fn new( exit: &Arc, + max_complete_transaction_status_slot: Arc, + blockstore: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, optimistically_confirmed_bank: Arc>, ) -> Self { Self::new_with_config( exit, + max_complete_transaction_status_slot, + blockstore, bank_forks, block_commitment_cache, optimistically_confirmed_bank, @@ -495,12 +522,38 @@ impl RpcSubscriptions { pub fn new_for_tests( exit: &Arc, + max_complete_transaction_status_slot: Arc, + bank_forks: Arc>, + block_commitment_cache: Arc>, + optimistically_confirmed_bank: Arc>, + ) -> Self { + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); + + Self::new_with_config( + exit, + max_complete_transaction_status_slot, + blockstore, + bank_forks, + block_commitment_cache, + optimistically_confirmed_bank, + &PubSubConfig::default_for_tests(), + ) + } + + pub fn new_for_tests_with_blockstore( + exit: &Arc, + max_complete_transaction_status_slot: Arc, + blockstore: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, optimistically_confirmed_bank: Arc>, ) -> Self { Self::new_with_config( exit, + max_complete_transaction_status_slot, + blockstore, bank_forks, block_commitment_cache, optimistically_confirmed_bank, @@ -510,6 +563,8 @@ impl RpcSubscriptions { pub fn new_with_config( exit: &Arc, + max_complete_transaction_status_slot: Arc, + blockstore: Arc, bank_forks: Arc>, block_commitment_cache: Arc>, optimistically_confirmed_bank: Arc>, @@ -541,6 +596,8 @@ impl RpcSubscriptions { pool.install(|| { Self::process_notifications( exit_clone, + max_complete_transaction_status_slot, + blockstore, notifier, notification_receiver, subscriptions, @@ -568,11 +625,19 @@ impl RpcSubscriptions { } // For tests only... - pub fn default_with_bank_forks(bank_forks: Arc>) -> Self { + pub fn default_with_bank_forks( + max_complete_transaction_status_slot: Arc, + bank_forks: Arc>, + ) -> Self { + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); Self::new( &Arc::new(AtomicBool::new(false)), + max_complete_transaction_status_slot, + blockstore, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::default())), optimistically_confirmed_bank, @@ -641,6 +706,8 @@ impl RpcSubscriptions { fn process_notifications( exit: Arc, + max_complete_transaction_status_slot: Arc, + blockstore: Arc, notifier: RpcNotifier, notification_receiver: Receiver, mut subscriptions: SubscriptionsTracker, @@ -720,27 +787,32 @@ impl RpcSubscriptions { } } NotificationEntry::Bank(commitment_slots) => { - RpcSubscriptions::notify_accounts_logs_programs_signatures( + const SOURCE: &str = "bank"; + RpcSubscriptions::notify_watchers( + max_complete_transaction_status_slot.clone(), subscriptions.commitment_watchers(), &bank_forks, + &blockstore, &commitment_slots, ¬ifier, - "bank", - ) + SOURCE, + ); } NotificationEntry::Gossip(slot) => { let commitment_slots = CommitmentSlots { highest_confirmed_slot: slot, ..CommitmentSlots::default() }; - - RpcSubscriptions::notify_accounts_logs_programs_signatures( + const SOURCE: &str = "gossip"; + RpcSubscriptions::notify_watchers( + max_complete_transaction_status_slot.clone(), subscriptions.gossip_watchers(), &bank_forks, + &blockstore, &commitment_slots, ¬ifier, - "gossip", - ) + SOURCE, + ); } NotificationEntry::SignaturesReceived((slot, slot_signatures)) => { for slot_signature in &slot_signatures { @@ -786,100 +858,205 @@ impl RpcSubscriptions { } } - fn notify_accounts_logs_programs_signatures( + fn notify_watchers( + max_complete_transaction_status_slot: Arc, subscriptions: &HashMap>, bank_forks: &Arc>, + blockstore: &Blockstore, commitment_slots: &CommitmentSlots, notifier: &RpcNotifier, source: &'static str, ) { - let mut total_time = Measure::start("notify_accounts_logs_programs_signatures"); + let mut total_time = Measure::start("notify_watchers"); + let num_accounts_found = AtomicUsize::new(0); let num_accounts_notified = AtomicUsize::new(0); + let num_blocks_found = AtomicUsize::new(0); + let num_blocks_notified = AtomicUsize::new(0); + let num_logs_found = AtomicUsize::new(0); let num_logs_notified = AtomicUsize::new(0); - let num_signatures_found = AtomicUsize::new(0); - let num_signatures_notified = AtomicUsize::new(0); - let num_programs_found = AtomicUsize::new(0); let num_programs_notified = AtomicUsize::new(0); + let num_signatures_found = AtomicUsize::new(0); + let num_signatures_notified = AtomicUsize::new(0); + let subscriptions = subscriptions.into_par_iter(); subscriptions.for_each(|(_id, subscription)| { + let slot = if let Some(commitment) = subscription.commitment() { + if commitment.is_finalized() { + Some(commitment_slots.highest_confirmed_root) + } else if commitment.is_confirmed() { + Some(commitment_slots.highest_confirmed_slot) + } else { + Some(commitment_slots.slot) + } + } else { + error!("missing commitment in notify_watchers"); + None + }; match subscription.params() { SubscriptionParams::Account(params) => { - let notified = check_commitment_and_notify( - params, - subscription, - bank_forks, - commitment_slots, - |bank, params| bank.get_account_modified_slot(¶ms.pubkey), - filter_account_result, - notifier, - false, - ); - num_accounts_found.fetch_add(1, Ordering::Relaxed); - - if notified { - num_accounts_notified.fetch_add(1, Ordering::Relaxed); + if let Some(slot) = slot { + let notified = check_commitment_and_notify( + params, + subscription, + bank_forks, + slot, + |bank, params| bank.get_account_modified_slot(¶ms.pubkey), + filter_account_result, + notifier, + false, + ); + + if notified { + num_accounts_notified.fetch_add(1, Ordering::Relaxed); + } + } + } + SubscriptionParams::Block(params) => { + num_blocks_found.fetch_add(1, Ordering::Relaxed); + if let Some(slot) = slot { + if let Some(bank) = bank_forks.read().unwrap().get(slot) { + // We're calling it unnotified in this context + // because, logically, it gets set to `last_notified_slot + 1` + // on the final iteration of the loop down below. + // This is used to notify blocks for slots that were + // potentially missed due to upstream transient errors + // that led to this notification not being triggered for + // a slot. + // + // e.g. + // notify_watchers is triggered for Slot 1 + // some time passes + // notify_watchers is triggered for Slot 4 + // this will try to fetch blocks for slots 2, 3, and 4 + // as long as they are ancestors of `slot` + let mut w_last_unnotified_slot = + subscription.last_notified_slot.write().unwrap(); + // would mean it's the first notification for this subscription connection + if *w_last_unnotified_slot == 0 { + *w_last_unnotified_slot = slot; + } + let mut slots_to_notify: Vec<_> = + (*w_last_unnotified_slot..slot).collect(); + let ancestors = bank.proper_ancestors_set(); + slots_to_notify = slots_to_notify + .into_iter() + .filter(|slot| ancestors.contains(slot)) + .collect(); + slots_to_notify.push(slot); + for s in slots_to_notify { + // To avoid skipping a slot that fails this condition, + // caused by non-deterministic concurrency accesses, we + // break out of the loop. Besides if the current `s` is + // greater, then any `s + K` is also greater. + if s > max_complete_transaction_status_slot.load(Ordering::SeqCst) { + break; + } + match blockstore.get_complete_block(s, false) { + Ok(block) => { + if let Some(res) = filter_block_result_txs(block, s, params) + { + notifier.notify( + Response { + context: RpcResponseContext { slot: s }, + value: res, + }, + subscription, + false, + ); + num_blocks_notified.fetch_add(1, Ordering::Relaxed); + // the next time this subscription is notified it will + // try to fetch all slots between (s + 1) to `slot`, inclusively + *w_last_unnotified_slot = s + 1; + } + } + Err(e) => { + // we don't advance `w_last_unnotified_slot` so that + // it'll retry on the next notification trigger + error!("get_complete_block error: {}", e); + notifier.notify( + Response { + context: RpcResponseContext { slot: s }, + value: RpcBlockUpdate { + slot, + block: None, + err: Some(RpcBlockUpdateError::BlockStoreError), + }, + }, + subscription, + false, + ); + } + } + } + } } } SubscriptionParams::Logs(params) => { - let notified = check_commitment_and_notify( - params, - subscription, - bank_forks, - commitment_slots, - get_transaction_logs, - filter_logs_results, - notifier, - false, - ); num_logs_found.fetch_add(1, Ordering::Relaxed); - - if notified { - num_logs_notified.fetch_add(1, Ordering::Relaxed); + if let Some(slot) = slot { + let notified = check_commitment_and_notify( + params, + subscription, + bank_forks, + slot, + get_transaction_logs, + filter_logs_results, + notifier, + false, + ); + + if notified { + num_logs_notified.fetch_add(1, Ordering::Relaxed); + } } } SubscriptionParams::Program(params) => { - let notified = check_commitment_and_notify( - params, - subscription, - bank_forks, - commitment_slots, - |bank, params| { - bank.get_program_accounts_modified_since_parent(¶ms.pubkey) - }, - filter_program_results, - notifier, - false, - ); num_programs_found.fetch_add(1, Ordering::Relaxed); - - if notified { - num_programs_notified.fetch_add(1, Ordering::Relaxed); + if let Some(slot) = slot { + let notified = check_commitment_and_notify( + params, + subscription, + bank_forks, + slot, + |bank, params| { + bank.get_program_accounts_modified_since_parent(¶ms.pubkey) + }, + filter_program_results, + notifier, + false, + ); + + if notified { + num_programs_notified.fetch_add(1, Ordering::Relaxed); + } } } SubscriptionParams::Signature(params) => { - let notified = check_commitment_and_notify( - params, - subscription, - bank_forks, - commitment_slots, - |bank, params| { - bank.get_signature_status_processed_since_parent(¶ms.signature) - }, - filter_signature_result, - notifier, - true, // Unsubscribe. - ); num_signatures_found.fetch_add(1, Ordering::Relaxed); - - if notified { - num_signatures_notified.fetch_add(1, Ordering::Relaxed); + if let Some(slot) = slot { + let notified = check_commitment_and_notify( + params, + subscription, + bank_forks, + slot, + |bank, params| { + bank.get_signature_status_processed_since_parent(¶ms.signature) + }, + filter_signature_result, + notifier, + true, // Unsubscribe. + ); + + if notified { + num_signatures_notified.fetch_add(1, Ordering::Relaxed); + } } } _ => error!("wrong subscription type in alps map"), @@ -998,13 +1175,14 @@ pub(crate) mod tests { optimistically_confirmed_bank_tracker::{ BankNotification, OptimisticallyConfirmedBank, OptimisticallyConfirmedBankTracker, }, + rpc::create_test_transactions_and_populate_blockstore, rpc_pubsub::RpcSolPubSubInternal, rpc_pubsub_service, }, serial_test::serial, solana_client::rpc_config::{ RpcAccountInfoConfig, RpcProgramAccountsConfig, RpcSignatureSubscribeConfig, - RpcTransactionLogsFilter, + RpcTransactionLogsFilter, {RpcBlockSubscribeConfig, RpcBlockSubscribeFilter}, }, solana_runtime::{ commitment::BlockCommitment, @@ -1017,7 +1195,11 @@ pub(crate) mod tests { stake, system_instruction, system_program, system_transaction, transaction::Transaction, }, - std::{collections::HashSet, sync::atomic::Ordering::Relaxed}, + solana_transaction_status::{TransactionDetails, UiTransactionEncoding}, + std::{ + collections::HashSet, + sync::atomic::{AtomicU64, Ordering::Relaxed}, + }, }; fn make_account_result(lamports: u64, subscription: u64, data: &str) -> serde_json::Value { @@ -1057,8 +1239,10 @@ pub(crate) mod tests { let alice = Keypair::new(); let exit = Arc::new(AtomicBool::new(false)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -1154,6 +1338,294 @@ pub(crate) mod tests { } } + #[test] + #[serial] + fn test_check_confirmed_block_subscribe() { + let exit = Arc::new(AtomicBool::new(false)); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore( + &exit, + max_complete_transaction_status_slot, + blockstore.clone(), + bank_forks.clone(), + Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), + optimistically_confirmed_bank, + )); + let (rpc, mut receiver) = rpc_pubsub_service::test_connection(&subscriptions); + let filter = RpcBlockSubscribeFilter::All; + let config = RpcBlockSubscribeConfig { + commitment: Some(CommitmentConfig::confirmed()), + encoding: Some(UiTransactionEncoding::Json), + transaction_details: Some(TransactionDetails::Signatures), + show_rewards: None, + }; + let params = BlockSubscriptionParams { + kind: BlockSubscriptionKind::All, + commitment: config.commitment.unwrap(), + encoding: config.encoding.unwrap(), + transaction_details: config.transaction_details.unwrap(), + show_rewards: config.show_rewards.unwrap_or_default(), + }; + let sub_id = rpc.block_subscribe(filter, Some(config)).unwrap(); + + subscriptions + .control + .assert_subscribed(&SubscriptionParams::Block(params.clone())); + + let bank = bank_forks.read().unwrap().working_bank(); + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let keypair3 = Keypair::new(); + let keypair4 = Keypair::new(); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); + let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore( + vec![&keypair1, &keypair2, &keypair3, &keypair4], + 0, + bank, + blockstore.clone(), + max_complete_transaction_status_slot, + ); + + let slot = 0; + subscriptions.notify_gossip_subscribers(slot); + let actual_resp = receiver.recv(); + let actual_resp = serde_json::from_str::(&actual_resp).unwrap(); + + let block = blockstore.get_complete_block(slot, false).unwrap(); + let block = block.configure(params.encoding, params.transaction_details, false); + let expected_resp = RpcBlockUpdate { + slot, + block: Some(block), + err: None, + }; + let expected_resp = json!({ + "jsonrpc": "2.0", + "method": "blockNotification", + "params": { + "result": { + "context": { "slot": slot }, + "value": expected_resp, + }, + "subscription": 0, + } + }); + assert_eq!(expected_resp, actual_resp); + + // should not trigger since commitment NOT set to finalized + subscriptions.notify_subscribers(CommitmentSlots { + slot, + root: slot, + highest_confirmed_slot: slot, + highest_confirmed_root: slot, + }); + let should_err = receiver.recv_timeout(Duration::from_millis(300)); + assert!(should_err.is_err()); + + rpc.slot_unsubscribe(sub_id).unwrap(); + subscriptions + .control + .assert_unsubscribed(&SubscriptionParams::Block(params)); + } + + #[test] + #[serial] + fn test_check_confirmed_block_subscribe_with_mentions() { + let exit = Arc::new(AtomicBool::new(false)); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore( + &exit, + max_complete_transaction_status_slot, + blockstore.clone(), + bank_forks.clone(), + Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), + optimistically_confirmed_bank, + )); + let (rpc, mut receiver) = rpc_pubsub_service::test_connection(&subscriptions); + let keypair1 = Keypair::new(); + let filter = + RpcBlockSubscribeFilter::MentionsAccountOrProgram(keypair1.pubkey().to_string()); + let config = RpcBlockSubscribeConfig { + commitment: Some(CommitmentConfig::confirmed()), + encoding: Some(UiTransactionEncoding::Json), + transaction_details: Some(TransactionDetails::Signatures), + show_rewards: None, + }; + let params = BlockSubscriptionParams { + kind: BlockSubscriptionKind::MentionsAccountOrProgram(keypair1.pubkey()), + commitment: config.commitment.unwrap(), + encoding: config.encoding.unwrap(), + transaction_details: config.transaction_details.unwrap(), + show_rewards: config.show_rewards.unwrap_or_default(), + }; + let sub_id = rpc.block_subscribe(filter, Some(config)).unwrap(); + + subscriptions + .control + .assert_subscribed(&SubscriptionParams::Block(params.clone())); + + let bank = bank_forks.read().unwrap().working_bank(); + let keypair2 = Keypair::new(); + let keypair3 = Keypair::new(); + let keypair4 = Keypair::new(); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); + let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore( + vec![&keypair1, &keypair2, &keypair3, &keypair4], + 0, + bank, + blockstore.clone(), + max_complete_transaction_status_slot, + ); + + let slot = 0; + subscriptions.notify_gossip_subscribers(slot); + let actual_resp = receiver.recv(); + let actual_resp = serde_json::from_str::(&actual_resp).unwrap(); + + // make sure it filtered out the other keypairs + let mut block = blockstore.get_complete_block(slot, false).unwrap(); + block.transactions.retain(|tx| { + tx.transaction + .message + .account_keys + .contains(&keypair1.pubkey()) + }); + let block = block.configure(params.encoding, params.transaction_details, false); + let expected_resp = RpcBlockUpdate { + slot, + block: Some(block), + err: None, + }; + let expected_resp = json!({ + "jsonrpc": "2.0", + "method": "blockNotification", + "params": { + "result": { + "context": { "slot": slot }, + "value": expected_resp, + }, + "subscription": 0, + } + }); + assert_eq!(expected_resp, actual_resp); + + rpc.slot_unsubscribe(sub_id).unwrap(); + subscriptions + .control + .assert_unsubscribed(&SubscriptionParams::Block(params)); + } + + #[test] + #[serial] + fn test_check_finalized_block_subscribe() { + let exit = Arc::new(AtomicBool::new(false)); + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); + let optimistically_confirmed_bank = + OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let ledger_path = get_tmp_ledger_path!(); + let blockstore = Blockstore::open(&ledger_path).unwrap(); + let blockstore = Arc::new(blockstore); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); + let subscriptions = Arc::new(RpcSubscriptions::new_for_tests_with_blockstore( + &exit, + max_complete_transaction_status_slot, + blockstore.clone(), + bank_forks.clone(), + Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), + optimistically_confirmed_bank, + )); + let (rpc, mut receiver) = rpc_pubsub_service::test_connection(&subscriptions); + let filter = RpcBlockSubscribeFilter::All; + let config = RpcBlockSubscribeConfig { + commitment: Some(CommitmentConfig::finalized()), + encoding: Some(UiTransactionEncoding::Json), + transaction_details: Some(TransactionDetails::Signatures), + show_rewards: None, + }; + let params = BlockSubscriptionParams { + kind: BlockSubscriptionKind::All, + commitment: config.commitment.unwrap(), + encoding: config.encoding.unwrap(), + transaction_details: config.transaction_details.unwrap(), + show_rewards: config.show_rewards.unwrap_or_default(), + }; + let sub_id = rpc.block_subscribe(filter, Some(config)).unwrap(); + subscriptions + .control + .assert_subscribed(&SubscriptionParams::Block(params.clone())); + + let bank = bank_forks.read().unwrap().working_bank(); + let keypair1 = Keypair::new(); + let keypair2 = Keypair::new(); + let keypair3 = Keypair::new(); + let keypair4 = Keypair::new(); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::new(blockstore.max_root())); + let _confirmed_block_signatures = create_test_transactions_and_populate_blockstore( + vec![&keypair1, &keypair2, &keypair3, &keypair4], + 0, + bank, + blockstore.clone(), + max_complete_transaction_status_slot, + ); + + let slot = 0; + subscriptions.notify_subscribers(CommitmentSlots { + slot, + root: slot, + highest_confirmed_slot: slot, + highest_confirmed_root: slot, + }); + let actual_resp = receiver.recv(); + let actual_resp = serde_json::from_str::(&actual_resp).unwrap(); + + let block = blockstore.get_complete_block(slot, false).unwrap(); + let block = block.configure(params.encoding, params.transaction_details, false); + let expected_resp = RpcBlockUpdate { + slot, + block: Some(block), + err: None, + }; + let expected_resp = json!({ + "jsonrpc": "2.0", + "method": "blockNotification", + "params": { + "result": { + "context": { "slot": slot }, + "value": expected_resp, + }, + "subscription": 0, + } + }); + assert_eq!(expected_resp, actual_resp); + + // should not trigger since commitment set to finalized + subscriptions.notify_gossip_subscribers(slot); + let should_err = receiver.recv_timeout(Duration::from_millis(300)); + assert!(should_err.is_err()); + + rpc.slot_unsubscribe(sub_id).unwrap(); + subscriptions + .control + .assert_unsubscribed(&SubscriptionParams::Block(params)); + } + #[test] #[serial] fn test_check_program_subscribe() { @@ -1185,8 +1657,10 @@ pub(crate) mod tests { let exit = Arc::new(AtomicBool::new(false)); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, @@ -1330,9 +1804,10 @@ pub(crate) mod tests { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -1499,9 +1974,10 @@ pub(crate) mod tests { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -1609,9 +2085,10 @@ pub(crate) mod tests { let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); let mut pending_optimistically_confirmed_banks = HashSet::new(); - + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -1795,8 +2272,10 @@ pub(crate) mod tests { let exit = Arc::new(AtomicBool::new(false)); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(block_commitment_cache)), optimistically_confirmed_bank, @@ -1967,8 +2446,10 @@ pub(crate) mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, @@ -2011,8 +2492,10 @@ pub(crate) mod tests { let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); let optimistically_confirmed_bank = OptimisticallyConfirmedBank::locked_from_bank_forks_root(&bank_forks); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks, Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests())), optimistically_confirmed_bank, @@ -2067,8 +2550,10 @@ pub(crate) mod tests { let mut pending_optimistically_confirmed_banks = HashSet::new(); let exit = Arc::new(AtomicBool::new(false)); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let subscriptions = Arc::new(RpcSubscriptions::new_for_tests( &exit, + max_complete_transaction_status_slot, bank_forks.clone(), Arc::new(RwLock::new(BlockCommitmentCache::new_for_tests_with_slots( 1, 1, @@ -2219,8 +2704,12 @@ pub(crate) mod tests { fn test_total_subscriptions() { let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(100); let bank = Bank::new_for_tests(&genesis_config); + let max_complete_transaction_status_slot = Arc::new(AtomicU64::default()); let bank_forks = Arc::new(RwLock::new(BankForks::new(bank))); - let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks(bank_forks)); + let subscriptions = Arc::new(RpcSubscriptions::default_with_bank_forks( + max_complete_transaction_status_slot, + bank_forks, + )); let (rpc1, _receiver1) = rpc_pubsub_service::test_connection(&subscriptions); let sub_id1 = rpc1 diff --git a/rpc/src/transaction_status_service.rs b/rpc/src/transaction_status_service.rs index 50d7ecb09fd8a8..25618fe22ce39b 100644 --- a/rpc/src/transaction_status_service.rs +++ b/rpc/src/transaction_status_service.rs @@ -6,7 +6,9 @@ use { blockstore::Blockstore, blockstore_processor::{TransactionStatusBatch, TransactionStatusMessage}, }, - solana_runtime::bank::{Bank, InnerInstructionsList, NonceInfo, TransactionLogMessages}, + solana_runtime::bank::{ + Bank, DurableNonceFee, TransactionExecutionDetails, TransactionExecutionResult, + }, solana_transaction_status::{ extract_and_fmt_memos, InnerInstructions, Reward, TransactionStatusMeta, }, @@ -67,64 +69,51 @@ impl TransactionStatusService { TransactionStatusMessage::Batch(TransactionStatusBatch { bank, transactions, - statuses, + execution_results, balances, token_balances, - inner_instructions, - transaction_logs, rent_debits, }) => { let slot = bank.slot(); - let inner_instructions_iter: Box< - dyn Iterator>, - > = if let Some(inner_instructions) = inner_instructions { - Box::new(inner_instructions.into_iter()) - } else { - Box::new(std::iter::repeat_with(|| None)) - }; - let transaction_logs_iter: Box< - dyn Iterator>, - > = if let Some(transaction_logs) = transaction_logs { - Box::new(transaction_logs.into_iter()) - } else { - Box::new(std::iter::repeat_with(|| None)) - }; for ( transaction, - (status, nonce), + execution_result, pre_balances, post_balances, pre_token_balances, post_token_balances, - inner_instructions, - log_messages, rent_debits, ) in izip!( transactions, - statuses, + execution_results, balances.pre_balances, balances.post_balances, token_balances.pre_token_balances, token_balances.post_token_balances, - inner_instructions_iter, - transaction_logs_iter, rent_debits, ) { - if Bank::can_commit(&status) { - let lamports_per_signature = nonce - .map(|nonce| nonce.lamports_per_signature()) - .unwrap_or_else(|| { - bank.get_lamports_per_signature_for_blockhash( - transaction.message().recent_blockhash(), - ) - }) - .expect("lamports_per_signature must be available"); + if let TransactionExecutionResult::Executed(details) = execution_result { + let TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + durable_nonce_fee, + } = details; + let lamports_per_signature = match durable_nonce_fee { + Some(DurableNonceFee::Valid(lamports_per_signature)) => { + Some(lamports_per_signature) + } + Some(DurableNonceFee::Invalid) => None, + None => bank.get_lamports_per_signature_for_blockhash( + transaction.message().recent_blockhash(), + ), + } + .expect("lamports_per_signature must be available"); let fee = Bank::get_fee_for_message_with_lamports_per_signature( transaction.message(), lamports_per_signature, ); - let tx_account_locks = - transaction.get_account_locks(bank.demote_program_write_locks()); + let tx_account_locks = transaction.get_account_locks_unchecked(); let inner_instructions = inner_instructions.map(|inner_instructions| { inner_instructions @@ -332,18 +321,21 @@ pub(crate) mod tests { let mut rent_debits = RentDebits::default(); rent_debits.insert(&pubkey, 123, 456); - let transaction_result = ( - Ok(()), - Some( - NonceFull::from_partial( - rollback_partial, - &SanitizedMessage::Legacy(message), - &[(pubkey, nonce_account)], - &rent_debits, - ) - .unwrap(), - ), - ); + let transaction_result = + TransactionExecutionResult::Executed(TransactionExecutionDetails { + status: Ok(()), + log_messages: None, + inner_instructions: None, + durable_nonce_fee: Some(DurableNonceFee::from( + &NonceFull::from_partial( + rollback_partial, + &SanitizedMessage::Legacy(message), + &[(pubkey, nonce_account)], + &rent_debits, + ) + .unwrap(), + )), + }); let balances = TransactionBalancesSet { pre_balances: vec![vec![123456]], @@ -375,11 +367,9 @@ pub(crate) mod tests { let transaction_status_batch = TransactionStatusBatch { bank, transactions: vec![transaction], - statuses: vec![transaction_result], + execution_results: vec![transaction_result], balances, token_balances, - inner_instructions: None, - transaction_logs: None, rent_debits: vec![rent_debits], }; diff --git a/runtime/Cargo.toml b/runtime/Cargo.toml index 8a1260fff8e15d..ef6bd9eaf3aa6d 100644 --- a/runtime/Cargo.toml +++ b/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-runtime" -version = "1.9.0" +version = "1.9.4" description = "Solana runtime" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -26,25 +26,28 @@ lazy_static = "1.4.0" log = "0.4.14" memmap2 = "0.5.0" num_cpus = "1.13.0" +num-derive = { version = "0.3" } +num-traits = { version = "0.2" } ouroboros = "0.13.0" rand = "0.7.0" rayon = "1.5.1" regex = "1.5.4" serde = { version = "1.0.130", features = ["rc"] } serde_derive = "1.0.103" -solana-config-program = { path = "../programs/config", version = "=1.9.0" } -solana-compute-budget-program = { path = "../programs/compute-budget", version = "=1.9.0" } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-bucket-map = { path = "../bucket_map", version = "=1.9.0" } -solana-program-runtime = { path = "../program-runtime", version = "=1.9.0" } -solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-address-lookup-table-program = { path = "../programs/address-lookup-table", version = "=1.9.4" } +solana-config-program = { path = "../programs/config", version = "=1.9.4" } +solana-compute-budget-program = { path = "../programs/compute-budget", version = "=1.9.4" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-bucket-map = { path = "../bucket_map", version = "=1.9.4" } +solana-program-runtime = { path = "../program-runtime", version = "=1.9.4" } +solana-rayon-threadlimit = { path = "../rayon-threadlimit", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } symlink = "0.1.0" tar = "0.4.37" tempfile = "3.2.0" diff --git a/runtime/src/accounts.rs b/runtime/src/accounts.rs index ee78fabbe2703d..e06819bf9370a8 100644 --- a/runtime/src/accounts.rs +++ b/runtime/src/accounts.rs @@ -36,7 +36,7 @@ use { pubkey::Pubkey, system_program, sysvar::{self, instructions::construct_instructions_data}, - transaction::{Result, SanitizedTransaction, TransactionError}, + transaction::{Result, SanitizedTransaction, TransactionAccountLocks, TransactionError}, }, std::{ cmp::Reverse, @@ -209,9 +209,8 @@ impl Accounts { fn construct_instructions_account( message: &SanitizedMessage, is_owned_by_sysvar: bool, - demote_program_write_locks: bool, ) -> AccountSharedData { - let data = construct_instructions_data(message, demote_program_write_locks); + let data = construct_instructions_data(message); let owner = if is_owned_by_sysvar { sysvar::id() } else { @@ -247,9 +246,6 @@ impl Accounts { let mut account_deps = Vec::with_capacity(message.account_keys_len()); let mut rent_debits = RentDebits::default(); let rent_for_sysvars = feature_set.is_active(&feature_set::rent_for_sysvars::id()); - let demote_program_write_locks = - feature_set.is_active(&feature_set::demote_program_write_locks::id()); - for (i, key) in message.account_keys_iter().enumerate() { let account = if !message.is_non_loader_key(i) { // Fill in an empty account for the program slots. @@ -264,14 +260,13 @@ impl Accounts { message, feature_set .is_active(&feature_set::instructions_sysvar_owned_by_sysvar::id()), - demote_program_write_locks, ) } else { let (account, rent) = self .accounts_db .load_with_fixed_root(ancestors, key) .map(|(mut account, _)| { - if message.is_writable(i, demote_program_write_locks) { + if message.is_writable(i) { let rent_due = rent_collector.collect_from_existing_account( key, &mut account, @@ -286,10 +281,7 @@ impl Accounts { .unwrap_or_default(); if bpf_loader_upgradeable::check_id(account.owner()) { - if demote_program_write_locks - && message.is_writable(i, demote_program_write_locks) - && !message.is_upgradeable_loader_present() - { + if message.is_writable(i) && !message.is_upgradeable_loader_present() { error_counters.invalid_writable_account += 1; return Err(TransactionError::InvalidWritableAccount); } @@ -315,10 +307,7 @@ impl Accounts { return Err(TransactionError::InvalidProgramForExecution); } } - } else if account.executable() - && demote_program_write_locks - && message.is_writable(i, demote_program_write_locks) - { + } else if account.executable() && message.is_writable(i) { error_counters.invalid_writable_account += 1; return Err(TransactionError::InvalidWritableAccount); } @@ -963,15 +952,11 @@ impl Accounts { pub fn lock_accounts<'a>( &self, txs: impl Iterator, - demote_program_write_locks: bool, + feature_set: &FeatureSet, ) -> Vec> { - let keys: Vec<_> = txs - .map(|tx| tx.get_account_locks(demote_program_write_locks)) - .collect(); - let account_locks = &mut self.account_locks.lock().unwrap(); - keys.into_iter() - .map(|keys| self.lock_account(account_locks, keys.writable, keys.readonly)) - .collect() + let tx_account_locks_results: Vec> = + txs.map(|tx| tx.get_account_locks(feature_set)).collect(); + self.lock_accounts_inner(tx_account_locks_results) } #[must_use] @@ -980,21 +965,33 @@ impl Accounts { &self, txs: impl Iterator, results: impl Iterator>, - demote_program_write_locks: bool, + feature_set: &FeatureSet, ) -> Vec> { - let key_results: Vec<_> = txs + let tx_account_locks_results: Vec> = txs .zip(results) .map(|(tx, result)| match result { - Ok(()) => Ok(tx.get_account_locks(demote_program_write_locks)), - Err(e) => Err(e), + Ok(()) => tx.get_account_locks(feature_set), + Err(err) => Err(err), }) .collect(); + self.lock_accounts_inner(tx_account_locks_results) + } + + #[must_use] + fn lock_accounts_inner( + &self, + tx_account_locks_results: Vec>, + ) -> Vec> { let account_locks = &mut self.account_locks.lock().unwrap(); - key_results + tx_account_locks_results .into_iter() - .map(|key_result| match key_result { - Ok(keys) => self.lock_account(account_locks, keys.writable, keys.readonly), - Err(e) => Err(e), + .map(|tx_account_locks_result| match tx_account_locks_result { + Ok(tx_account_locks) => self.lock_account( + account_locks, + tx_account_locks.writable, + tx_account_locks.readonly, + ), + Err(err) => Err(err), }) .collect() } @@ -1005,17 +1002,18 @@ impl Accounts { &self, txs: impl Iterator, results: &[Result<()>], - demote_program_write_locks: bool, ) { let keys: Vec<_> = txs .zip(results) .filter_map(|(tx, res)| match res { - Err(TransactionError::AccountInUse) => None, - Err(TransactionError::SanitizeFailure) => None, - Err(TransactionError::AccountLoadedTwice) => None, - Err(TransactionError::WouldExceedMaxBlockCostLimit) => None, - Err(TransactionError::WouldExceedMaxAccountCostLimit) => None, - _ => Some(tx.get_account_locks(demote_program_write_locks)), + Err(TransactionError::AccountLoadedTwice) + | Err(TransactionError::AccountInUse) + | Err(TransactionError::SanitizeFailure) + | Err(TransactionError::TooManyAccountLocks) + | Err(TransactionError::WouldExceedMaxBlockCostLimit) + | Err(TransactionError::WouldExceedMaxAccountCostLimit) + | Err(TransactionError::WouldExceedMaxAccountDataCostLimit) => None, + _ => Some(tx.get_account_locks_unchecked()), }) .collect(); let mut account_locks = self.account_locks.lock().unwrap(); @@ -1038,7 +1036,6 @@ impl Accounts { blockhash: &Hash, lamports_per_signature: u64, rent_for_sysvars: bool, - demote_program_write_locks: bool, leave_nonce_on_success: bool, ) { let accounts_to_store = self.collect_accounts_to_store( @@ -1049,7 +1046,6 @@ impl Accounts { blockhash, lamports_per_signature, rent_for_sysvars, - demote_program_write_locks, leave_nonce_on_success, ); self.accounts_db.store_cached(slot, &accounts_to_store); @@ -1077,30 +1073,38 @@ impl Accounts { blockhash: &Hash, lamports_per_signature: u64, rent_for_sysvars: bool, - demote_program_write_locks: bool, leave_nonce_on_success: bool, ) -> Vec<(&'a Pubkey, &'a AccountSharedData)> { let mut accounts = Vec::with_capacity(load_results.len()); - for (i, ((tx_load_result, _), tx)) in load_results.iter_mut().zip(txs).enumerate() { + for (i, ((tx_load_result, nonce), tx)) in load_results.iter_mut().zip(txs).enumerate() { if tx_load_result.is_err() { // Don't store any accounts if tx failed to load continue; } - let (execution_result, nonce) = &execution_results[i]; - let maybe_nonce = match (execution_result, nonce) { - (Ok(_), Some(nonce)) => { + let execution_status = match &execution_results[i] { + TransactionExecutionResult::Executed(details) => &details.status, + // Don't store any accounts if tx wasn't executed + TransactionExecutionResult::NotExecuted(_) => continue, + }; + + let maybe_nonce = match (execution_status, &*nonce) { + (Ok(()), Some(nonce)) => { if leave_nonce_on_success { None } else { Some((nonce, false /* rollback */)) } } - (Err(TransactionError::InstructionError(_, _)), Some(nonce)) => { + (Err(_), Some(nonce)) => { Some((nonce, true /* rollback */)) } - (Ok(_), _) => None, // Success, don't do any additional nonce processing - (Err(_), _) => continue, // Not nonce, don't store any accounts + (Ok(_), None) => None, // Success, don't do any additional nonce processing + (Err(_), None) => { + // Fees for failed transactions which don't use durable nonces are + // deducted in Bank::filter_program_errors_and_collect_fee + continue; + } }; let message = tx.message(); @@ -1114,18 +1118,18 @@ impl Accounts { fee_payer_index = Some(i); } let is_fee_payer = Some(i) == fee_payer_index; - if message.is_writable(i, demote_program_write_locks) { + if message.is_writable(i) { let is_nonce_account = prepare_if_nonce_account( address, account, - execution_result, + execution_status, is_fee_payer, maybe_nonce, blockhash, lamports_per_signature, ); - if execution_result.is_ok() || is_nonce_account || is_fee_payer { + if execution_status.is_ok() || is_nonce_account || is_fee_payer { if account.rent_epoch() == INITIAL_RENT_EPOCH { let rent = rent_collector.collect_from_created_account( address, @@ -1234,19 +1238,22 @@ pub fn update_accounts_bench(accounts: &Accounts, pubkeys: &[Pubkey], slot: u64) mod tests { use { super::*, - crate::rent_collector::RentCollector, + crate::{ + bank::{DurableNonceFee, TransactionExecutionDetails}, + rent_collector::RentCollector, + }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, epoch_schedule::EpochSchedule, genesis_config::ClusterType, hash::Hash, instruction::{CompiledInstruction, InstructionError}, - message::Message, + message::{Message, MessageHeader}, nonce, nonce_account, rent::Rent, signature::{keypair_from_seed, signers::Signers, Keypair, Signer}, system_instruction, system_program, - transaction::Transaction, + transaction::{Transaction, MAX_TX_ACCOUNT_LOCKS}, }, std::{ convert::TryFrom, @@ -1267,6 +1274,18 @@ mod tests { )) } + fn new_execution_result( + status: Result<()>, + nonce: Option<&NonceFull>, + ) -> TransactionExecutionResult { + TransactionExecutionResult::Executed(TransactionExecutionDetails { + status, + log_messages: None, + inner_instructions: None, + durable_nonce_fee: nonce.map(DurableNonceFee::from), + }) + } + fn load_accounts_with_fee_and_rent( tx: Transaction, ka: &[(Pubkey, AccountSharedData)], @@ -2124,6 +2143,109 @@ mod tests { accounts.bank_hash_at(1); } + #[test] + fn test_lock_accounts_with_duplicates() { + let accounts = Accounts::new_with_config_for_tests( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + AccountShrinkThreshold::default(), + ); + + let keypair = Keypair::new(); + let message = Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![keypair.pubkey(), keypair.pubkey()], + ..Message::default() + }; + + let tx = new_sanitized_tx(&[&keypair], message, Hash::default()); + let results = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled()); + assert_eq!(results[0], Err(TransactionError::AccountLoadedTwice)); + } + + #[test] + fn test_lock_accounts_with_too_many_accounts() { + let accounts = Accounts::new_with_config_for_tests( + Vec::new(), + &ClusterType::Development, + AccountSecondaryIndexes::default(), + false, + AccountShrinkThreshold::default(), + ); + + let keypair = Keypair::new(); + + // Allow up to MAX_TX_ACCOUNT_LOCKS + { + let num_account_keys = MAX_TX_ACCOUNT_LOCKS; + let mut account_keys: Vec<_> = (0..num_account_keys) + .map(|_| Pubkey::new_unique()) + .collect(); + account_keys[0] = keypair.pubkey(); + let message = Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys, + ..Message::default() + }; + + let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; + let results = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled()); + assert_eq!(results[0], Ok(())); + accounts.unlock_accounts(txs.iter(), &results); + } + + // Allow over MAX_TX_ACCOUNT_LOCKS before feature activation + { + let num_account_keys = MAX_TX_ACCOUNT_LOCKS + 1; + let mut account_keys: Vec<_> = (0..num_account_keys) + .map(|_| Pubkey::new_unique()) + .collect(); + account_keys[0] = keypair.pubkey(); + let message = Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys, + ..Message::default() + }; + + let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; + let results = accounts.lock_accounts(txs.iter(), &FeatureSet::default()); + assert_eq!(results[0], Ok(())); + accounts.unlock_accounts(txs.iter(), &results); + } + + // Disallow over MAX_TX_ACCOUNT_LOCKS after feature activation + { + let num_account_keys = MAX_TX_ACCOUNT_LOCKS + 1; + let mut account_keys: Vec<_> = (0..num_account_keys) + .map(|_| Pubkey::new_unique()) + .collect(); + account_keys[0] = keypair.pubkey(); + let message = Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys, + ..Message::default() + }; + + let txs = vec![new_sanitized_tx(&[&keypair], message, Hash::default())]; + let results = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled()); + assert_eq!(results[0], Err(TransactionError::TooManyAccountLocks)); + } + } + #[test] fn test_accounts_locks() { let keypair0 = Keypair::new(); @@ -2148,8 +2270,6 @@ mod tests { accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3); - let demote_program_write_locks = true; - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -2160,7 +2280,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx.clone()].iter(), demote_program_write_locks); + let results0 = accounts.lock_accounts([tx.clone()].iter(), &FeatureSet::all_enabled()); assert!(results0[0].is_ok()); assert_eq!( @@ -2195,7 +2315,7 @@ mod tests { ); let tx1 = new_sanitized_tx(&[&keypair1], message, Hash::default()); let txs = vec![tx0, tx1]; - let results1 = accounts.lock_accounts(txs.iter(), demote_program_write_locks); + let results1 = accounts.lock_accounts(txs.iter(), &FeatureSet::all_enabled()); assert!(results1[0].is_ok()); // Read-only account (keypair1) can be referenced multiple times assert!(results1[1].is_err()); // Read-only account (keypair1) cannot also be locked as writable @@ -2210,8 +2330,8 @@ mod tests { 2 ); - accounts.unlock_accounts([tx].iter(), &results0, demote_program_write_locks); - accounts.unlock_accounts(txs.iter(), &results1, demote_program_write_locks); + accounts.unlock_accounts([tx].iter(), &results0); + accounts.unlock_accounts(txs.iter(), &results1); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -2222,7 +2342,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair1], message, Hash::default()); - let results2 = accounts.lock_accounts([tx].iter(), demote_program_write_locks); + let results2 = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled()); assert!(results2[0].is_ok()); // Now keypair1 account can be locked as writable // Check that read-only lock with zero references is deleted @@ -2259,8 +2379,6 @@ mod tests { accounts.store_slow_uncached(0, &keypair1.pubkey(), &account1); accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); - let demote_program_write_locks = true; - let accounts_arc = Arc::new(accounts); let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; @@ -2295,13 +2413,13 @@ mod tests { let txs = vec![writable_tx.clone()]; let results = accounts_clone .clone() - .lock_accounts(txs.iter(), demote_program_write_locks); + .lock_accounts(txs.iter(), &FeatureSet::all_enabled()); for result in results.iter() { if result.is_ok() { counter_clone.clone().fetch_add(1, Ordering::SeqCst); } } - accounts_clone.unlock_accounts(txs.iter(), &results, demote_program_write_locks); + accounts_clone.unlock_accounts(txs.iter(), &results); if exit_clone.clone().load(Ordering::Relaxed) { break; } @@ -2312,13 +2430,13 @@ mod tests { let txs = vec![readonly_tx.clone()]; let results = accounts_arc .clone() - .lock_accounts(txs.iter(), demote_program_write_locks); + .lock_accounts(txs.iter(), &FeatureSet::all_enabled()); if results[0].is_ok() { let counter_value = counter_clone.clone().load(Ordering::SeqCst); thread::sleep(time::Duration::from_millis(50)); assert_eq!(counter_value, counter_clone.clone().load(Ordering::SeqCst)); } - accounts_arc.unlock_accounts(txs.iter(), &results, demote_program_write_locks); + accounts_arc.unlock_accounts(txs.iter(), &results); thread::sleep(time::Duration::from_millis(50)); } exit.store(true, Ordering::Relaxed); @@ -2348,8 +2466,6 @@ mod tests { accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3); - let demote_program_write_locks = true; - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -2360,7 +2476,7 @@ mod tests { instructions, ); let tx = new_sanitized_tx(&[&keypair0], message, Hash::default()); - let results0 = accounts.lock_accounts([tx].iter(), demote_program_write_locks); + let results0 = accounts.lock_accounts([tx].iter(), &FeatureSet::all_enabled()); assert!(results0[0].is_ok()); // Instruction program-id account demoted to readonly @@ -2413,8 +2529,6 @@ mod tests { accounts.store_slow_uncached(0, &keypair2.pubkey(), &account2); accounts.store_slow_uncached(0, &keypair3.pubkey(), &account3); - let demote_program_write_locks = true; - let instructions = vec![CompiledInstruction::new(2, &(), vec![0, 1])]; let message = Message::new_with_compiled_instructions( 1, @@ -2456,7 +2570,7 @@ mod tests { let results = accounts.lock_accounts_with_results( txs.iter(), qos_results.into_iter(), - demote_program_write_locks, + &FeatureSet::all_enabled(), ); assert!(results[0].is_ok()); // Read-only account (keypair0) can be referenced multiple times @@ -2483,7 +2597,7 @@ mod tests { .get(&keypair2.pubkey()) .is_none()); - accounts.unlock_accounts(txs.iter(), &results, demote_program_write_locks); + accounts.unlock_accounts(txs.iter(), &results); // check all locks to be removed assert!(accounts @@ -2578,16 +2692,15 @@ mod tests { .insert_new_readonly(&pubkey); } let txs = vec![tx0, tx1]; - let programs = vec![(Ok(()), None), (Ok(()), None)]; + let execution_results = vec![new_execution_result(Ok(()), None); 2]; let collected_accounts = accounts.collect_accounts_to_store( &txs, - &programs, + &execution_results, loaded.as_mut_slice(), &rent_collector, &Hash::default(), 0, true, - true, // demote_program_write_locks true, // leave_nonce_on_success ); assert_eq!(collected_accounts.len(), 2); @@ -3002,22 +3115,21 @@ mod tests { AccountShrinkThreshold::default(), ); let txs = vec![tx]; - let programs = vec![( + let execution_results = vec![new_execution_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), - nonce, + nonce.as_ref(), )]; let collected_accounts = accounts.collect_accounts_to_store( &txs, - &programs, + &execution_results, loaded.as_mut_slice(), &rent_collector, &next_blockhash, 0, true, - true, // demote_program_write_locks true, // leave_nonce_on_success ); assert_eq!(collected_accounts.len(), 2); @@ -3113,22 +3225,21 @@ mod tests { AccountShrinkThreshold::default(), ); let txs = vec![tx]; - let programs = vec![( + let execution_results = vec![new_execution_result( Err(TransactionError::InstructionError( 1, InstructionError::InvalidArgument, )), - nonce, + nonce.as_ref(), )]; let collected_accounts = accounts.collect_accounts_to_store( &txs, - &programs, + &execution_results, loaded.as_mut_slice(), &rent_collector, &next_blockhash, 0, true, - true, // demote_program_write_locks true, // leave_nonce_on_success ); assert_eq!(collected_accounts.len(), 1); diff --git a/runtime/src/accounts_db.rs b/runtime/src/accounts_db.rs index df9078bd64a12e..5e3ca0c5f8bb57 100644 --- a/runtime/src/accounts_db.rs +++ b/runtime/src/accounts_db.rs @@ -220,6 +220,19 @@ pub struct ErrorCounters { pub invalid_writable_account: usize, } +#[derive(Debug, Default, Clone, Copy)] +pub struct IndexGenerationInfo { + pub accounts_data_len: u64, +} + +#[derive(Debug, Default, Clone, Copy)] +struct SlotIndexGenerationInfo { + insert_time_us: u64, + num_accounts: u64, + num_accounts_rent_exempt: u64, + accounts_data_len: u64, +} + #[derive(Default, Debug)] struct GenerateIndexTimings { pub index_time: u64, @@ -234,6 +247,7 @@ struct GenerateIndexTimings { pub index_flush_us: u64, pub rent_exempt: u64, pub total_duplicates: u64, + pub accounts_data_len_dedup_time_us: u64, } #[derive(Default, Debug, PartialEq)] @@ -280,6 +294,11 @@ impl GenerateIndexTimings { i64 ), ("total_items", self.total_items as i64, i64), + ( + "accounts_data_len_dedup_time_us", + self.accounts_data_len_dedup_time_us as i64, + i64 + ), ); } } @@ -6657,21 +6676,21 @@ impl AccountsDb { accounts_map } - /// return time_us, # accts rent exempt, total # accts fn generate_index_for_slot<'a>( &self, accounts_map: GenerateIndexAccountsMap<'a>, slot: &Slot, rent_collector: &RentCollector, - ) -> (u64, u64, u64) { + ) -> SlotIndexGenerationInfo { if accounts_map.is_empty() { - return (0, 0, 0); + return SlotIndexGenerationInfo::default(); } let secondary = !self.account_indexes.is_empty(); - let mut rent_exempt = 0; - let len = accounts_map.len(); + let mut accounts_data_len = 0; + let mut num_accounts_rent_exempt = 0; + let num_accounts = accounts_map.len(); let items = accounts_map.into_iter().map( |( pubkey, @@ -6689,12 +6708,13 @@ impl AccountsDb { &self.account_indexes, ); } + accounts_data_len += stored_account.data().len() as u64; if !rent_collector.should_collect_rent(&pubkey, &stored_account, false) || { let (_rent_due, exempt) = rent_collector.get_rent_due(&stored_account); exempt } { - rent_exempt += 1; + num_accounts_rent_exempt += 1; } ( @@ -6709,9 +6729,9 @@ impl AccountsDb { }, ); - let (dirty_pubkeys, insert_us) = self + let (dirty_pubkeys, insert_time_us) = self .accounts_index - .insert_new_if_missing_into_primary_index(*slot, len, items); + .insert_new_if_missing_into_primary_index(*slot, num_accounts, items); // dirty_pubkeys will contain a pubkey if an item has multiple rooted entries for // a given pubkey. If there is just a single item, there is no cleaning to @@ -6719,7 +6739,12 @@ impl AccountsDb { if !dirty_pubkeys.is_empty() { self.uncleaned_pubkeys.insert(*slot, dirty_pubkeys); } - (insert_us, rent_exempt, len as u64) + SlotIndexGenerationInfo { + insert_time_us, + num_accounts: num_accounts as u64, + num_accounts_rent_exempt, + accounts_data_len, + } } fn filler_unique_id_bytes() -> usize { @@ -6855,7 +6880,7 @@ impl AccountsDb { limit_load_slot_count_from_snapshot: Option, verify: bool, genesis_config: &GenesisConfig, - ) { + ) -> IndexGenerationInfo { let mut slots = self.storage.all_slots(); #[allow(clippy::stable_sort_primitive)] slots.sort(); @@ -6870,6 +6895,7 @@ impl AccountsDb { genesis_config.slots_per_year(), &genesis_config.rent, ); + let accounts_data_len = AtomicU64::new(0); // pass == 0 always runs and generates the index // pass == 1 only runs if verify == true. @@ -6915,10 +6941,16 @@ impl AccountsDb { let insert_us = if pass == 0 { // generate index - let (insert_us, rent_exempt_this_slot, total_this_slot) = - self.generate_index_for_slot(accounts_map, slot, &rent_collector); + let SlotIndexGenerationInfo { + insert_time_us: insert_us, + num_accounts: total_this_slot, + num_accounts_rent_exempt: rent_exempt_this_slot, + accounts_data_len: accounts_data_len_this_slot, + } = self.generate_index_for_slot(accounts_map, slot, &rent_collector); rent_exempt.fetch_add(rent_exempt_this_slot, Ordering::Relaxed); total_duplicates.fetch_add(total_this_slot, Ordering::Relaxed); + accounts_data_len + .fetch_add(accounts_data_len_this_slot, Ordering::Relaxed); insert_us } else { // verify index matches expected and measure the time to get all items @@ -6972,6 +7004,30 @@ impl AccountsDb { }) .sum(); + // subtract data.len() from accounts_data_len for all old accounts that are in the index twice + let mut accounts_data_len_dedup_timer = + Measure::start("handle accounts data len duplicates"); + if pass == 0 { + let mut unique_pubkeys = HashSet::::default(); + self.uncleaned_pubkeys.iter().for_each(|entry| { + entry.value().iter().for_each(|pubkey| { + unique_pubkeys.insert(*pubkey); + }) + }); + let accounts_data_len_from_duplicates = unique_pubkeys + .into_iter() + .collect::>() + .par_chunks(4096) + .map(|pubkeys| self.pubkeys_to_duplicate_accounts_data_len(pubkeys)) + .sum(); + accounts_data_len.fetch_sub(accounts_data_len_from_duplicates, Ordering::Relaxed); + info!( + "accounts data len: {}", + accounts_data_len.load(Ordering::Relaxed) + ); + } + accounts_data_len_dedup_timer.stop(); + let storage_info_timings = storage_info_timings.into_inner().unwrap(); let mut index_flush_us = 0; @@ -6996,6 +7052,7 @@ impl AccountsDb { storage_size_accounts_map_us: storage_info_timings.storage_size_accounts_map_us, storage_size_accounts_map_flatten_us: storage_info_timings .storage_size_accounts_map_flatten_us, + accounts_data_len_dedup_time_us: accounts_data_len_dedup_timer.as_us(), ..GenerateIndexTimings::default() }; @@ -7009,6 +7066,43 @@ impl AccountsDb { } timings.report(); } + + IndexGenerationInfo { + accounts_data_len: accounts_data_len.load(Ordering::Relaxed), + } + } + + /// Used during generate_index() to get the _duplicate_ accounts data len from the given pubkeys + fn pubkeys_to_duplicate_accounts_data_len(&self, pubkeys: &[Pubkey]) -> u64 { + let mut accounts_data_len_from_duplicates = 0; + pubkeys.iter().for_each(|pubkey| { + if let Some(entry) = self.accounts_index.get_account_read_entry(pubkey) { + let slot_list = entry.slot_list(); + if slot_list.len() < 2 { + return; + } + // Only the account data len in the highest slot should be used, and the rest are + // duplicates. So sort the slot list in descending slot order, skip the first + // item, then sum up the remaining data len, which are the duplicates. + let mut slot_list = slot_list.clone(); + slot_list + .select_nth_unstable_by(0, |a, b| b.0.cmp(&a.0)) + .2 + .iter() + .for_each(|(slot, account_info)| { + let maybe_storage_entry = self + .storage + .get_account_storage_entry(*slot, account_info.store_id); + let mut accessor = LoadedAccountAccessor::Stored( + maybe_storage_entry.map(|entry| (entry, account_info.offset)), + ); + let loaded_account = accessor.check_and_get_loaded_account(); + let account = loaded_account.take_account(); + accounts_data_len_from_duplicates += account.data().len(); + }); + } + }); + accounts_data_len_from_duplicates as u64 } fn update_storage_info( diff --git a/runtime/src/accounts_index.rs b/runtime/src/accounts_index.rs index 18b03c8c0a385a..4a71f074125852 100644 --- a/runtime/src/accounts_index.rs +++ b/runtime/src/accounts_index.rs @@ -44,7 +44,7 @@ pub const ACCOUNTS_INDEX_CONFIG_FOR_TESTING: AccountsIndexConfig = AccountsIndex bins: Some(BINS_FOR_TESTING), flush_threads: Some(FLUSH_THREADS_TESTING), drives: None, - index_limit_mb: Some(1), + index_limit_mb: None, ages_to_stay_in_cache: None, scan_results_limit_bytes: None, }; @@ -840,7 +840,13 @@ pub struct AccountsIndex { impl AccountsIndex { pub fn default_for_tests() -> Self { - Self::new(Some(ACCOUNTS_INDEX_CONFIG_FOR_TESTING)) + let mut config = ACCOUNTS_INDEX_CONFIG_FOR_TESTING; + if let Ok(limit) = std::env::var("SOLANA_TEST_ACCOUNTS_INDEX_MEMORY_LIMIT_MB") { + // allocate with disk buckets + config.index_limit_mb = Some(limit.parse::().unwrap()); + } + + Self::new(Some(config)) } pub fn new(config: Option) -> Self { diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index bb36bf19255495..de4ea9fde343d9 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -37,7 +37,10 @@ use solana_sdk::recent_blockhashes_account; use { crate::{ - accounts::{AccountAddressFilter, Accounts, TransactionAccounts, TransactionLoadResult}, + accounts::{ + AccountAddressFilter, Accounts, LoadedTransaction, TransactionAccounts, + TransactionLoadResult, + }, accounts_db::{ AccountShrinkThreshold, AccountsDbConfig, ErrorCounters, SnapshotStorages, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, @@ -56,7 +59,7 @@ use { calculate_stake_weighted_timestamp, MaxAllowableDrift, MAX_ALLOWABLE_DRIFT_PERCENTAGE, MAX_ALLOWABLE_DRIFT_PERCENTAGE_FAST, MAX_ALLOWABLE_DRIFT_PERCENTAGE_SLOW, }, - stakes::Stakes, + stakes::{InvalidCacheEntryReason, Stakes, StakesCache}, status_cache::{SlotDelta, StatusCache}, system_instruction_processor::{get_system_account_kind, SystemAccountKind}, transaction_batch::TransactionBatch, @@ -74,7 +77,10 @@ use { solana_metrics::{inc_new_counter_debug, inc_new_counter_info}, solana_program_runtime::{ instruction_recorder::InstructionRecorder, - invoke_context::{BuiltinProgram, Executor, Executors, ProcessInstructionWithContext}, + invoke_context::{ + BuiltinProgram, Executor, Executors, ProcessInstructionWithContext, + TransactionAccountRefCells, + }, log_collector::LogCollector, timings::ExecuteDetailsTimings, }, @@ -118,7 +124,7 @@ use { slot_hashes::SlotHashes, slot_history::SlotHistory, system_transaction, - sysvar::{self}, + sysvar::{self, Sysvar, SysvarId}, timing::years_as_slots, transaction::{ Result, SanitizedTransaction, Transaction, TransactionError, @@ -143,7 +149,10 @@ use { ptr, rc::Rc, sync::{ - atomic::{AtomicBool, AtomicU64, Ordering::Relaxed}, + atomic::{ + AtomicBool, AtomicU64, + Ordering::{AcqRel, Acquire, Relaxed, Release}, + }, Arc, LockResult, RwLock, RwLockReadGuard, RwLockWriteGuard, }, time::{Duration, Instant}, @@ -234,9 +243,8 @@ impl ExecuteTimings { } type BankStatusCache = StatusCache>; -#[frozen_abi(digest = "7bCDimGo11ajw6ZHViBBu8KPfoDZBcwSnumWCU8MMuwr")] +#[frozen_abi(digest = "HvKCvCAizDb2sEGnqVKbSoZT2iCs7iMpqB6ErgU5uzS")] pub type BankSlotDelta = SlotDelta>; -type TransactionAccountRefCells = Vec<(Pubkey, Rc>)>; // Eager rent collection repeats in cyclic manner. // Each cycle is composed of number of tiny pubkey subranges @@ -501,12 +509,91 @@ impl StatusCacheRc { } pub type TransactionCheckResult = (Result<()>, Option); -pub type TransactionExecutionResult = (Result<()>, Option); + pub struct TransactionResults { pub fee_collection_results: Vec>, pub execution_results: Vec, pub rent_debits: Vec, } + +#[derive(Debug, Clone)] +pub struct TransactionExecutionDetails { + pub status: Result<()>, + pub log_messages: Option>, + pub inner_instructions: Option>>, + pub durable_nonce_fee: Option, +} + +/// Type safe representation of a transaction execution attempt which +/// differentiates between a transaction that was executed (will be +/// committed to the ledger) and a transaction which wasn't executed +/// and will be dropped. +/// +/// Note: `Result` is not +/// used because it's easy to forget that the inner `details.status` field +/// is what should be checked to detect a successful transaction. This +/// enum provides a convenience method `Self::was_executed_successfully` to +/// make such checks hard to do incorrectly. +#[derive(Debug, Clone)] +pub enum TransactionExecutionResult { + Executed(TransactionExecutionDetails), + NotExecuted(TransactionError), +} + +impl TransactionExecutionResult { + pub fn was_executed_successfully(&self) -> bool { + match self { + Self::Executed(details) => details.status.is_ok(), + Self::NotExecuted { .. } => false, + } + } + + pub fn was_executed(&self) -> bool { + match self { + Self::Executed(_) => true, + Self::NotExecuted(_) => false, + } + } + + pub fn details(&self) -> Option<&TransactionExecutionDetails> { + match self { + Self::Executed(details) => Some(details), + Self::NotExecuted(_) => None, + } + } + + pub fn flattened_result(&self) -> Result<()> { + match self { + Self::Executed(details) => details.status.clone(), + Self::NotExecuted(err) => Err(err.clone()), + } + } +} + +#[derive(Debug, Clone)] +pub enum DurableNonceFee { + Valid(u64), + Invalid, +} + +impl From<&NonceFull> for DurableNonceFee { + fn from(nonce: &NonceFull) -> Self { + match nonce.lamports_per_signature() { + Some(lamports_per_signature) => Self::Valid(lamports_per_signature), + None => Self::Invalid, + } + } +} + +impl DurableNonceFee { + pub fn lamports_per_signature(&self) -> Option { + match self { + Self::Valid(lamports_per_signature) => Some(*lamports_per_signature), + Self::Invalid => None, + } + } +} + pub struct TransactionSimulationResult { pub result: Result<()>, pub logs: TransactionLogMessages, @@ -769,7 +856,7 @@ pub(crate) struct BankFieldsToSerialize<'a> { pub(crate) rent_collector: RentCollector, pub(crate) epoch_schedule: EpochSchedule, pub(crate) inflation: Inflation, - pub(crate) stakes: &'a RwLock, + pub(crate) stakes: &'a StakesCache, pub(crate) epoch_stakes: &'a HashMap, pub(crate) is_delta: bool, } @@ -808,7 +895,7 @@ impl PartialEq for Bank { && self.rent_collector == other.rent_collector && self.epoch_schedule == other.epoch_schedule && *self.inflation.read().unwrap() == *other.inflation.read().unwrap() - && *self.stakes.read().unwrap() == *other.stakes.read().unwrap() + && *self.stakes_cache.stakes() == *other.stakes_cache.stakes() && self.epoch_stakes == other.epoch_stakes && self.is_delta.load(Relaxed) == other.is_delta.load(Relaxed) } @@ -988,7 +1075,7 @@ pub struct Bank { inflation: Arc>, /// cache of vote_account and stake_account state for this fork - stakes: RwLock, + stakes_cache: StakesCache, /// staked nodes on epoch boundaries, saved off when a bank.slot() is at /// a leader schedule calculation boundary @@ -1040,6 +1127,10 @@ pub struct Bank { pub cost_tracker: RwLock, sysvar_cache: RwLock)>>, + + /// Current size of the accounts data. Used when processing messages to enforce a limit on its + /// maximum size. + accounts_data_len: AtomicU64, } impl Default for BlockhashQueue { @@ -1054,6 +1145,12 @@ struct VoteWithStakeDelegations { delegations: Vec<(Pubkey, (StakeState, AccountSharedData))>, } +struct LoadVoteAndStakeAccountsResult { + vote_with_stake_delegations_map: DashMap, + invalid_stake_keys: DashMap, + invalid_vote_keys: DashMap, +} + #[derive(Debug, Default)] pub struct NewBankOptions { pub vote_only_bank: bool, @@ -1119,7 +1216,7 @@ impl Bank { } fn default_with_accounts(accounts: Accounts) -> Self { - Self { + let bank = Self { rc: BankRc::new(accounts, Slot::default()), src: StatusCacheRc::default(), blockhash_queue: RwLock::::default(), @@ -1154,7 +1251,7 @@ impl Bank { rent_collector: RentCollector::default(), epoch_schedule: EpochSchedule::default(), inflation: Arc::>::default(), - stakes: RwLock::::default(), + stakes_cache: StakesCache::default(), epoch_stakes: HashMap::::default(), is_delta: AtomicBool::default(), builtin_programs: BuiltinPrograms::default(), @@ -1175,7 +1272,14 @@ impl Bank { vote_only_bank: false, cost_tracker: RwLock::::default(), sysvar_cache: RwLock::new(Vec::new()), - } + accounts_data_len: AtomicU64::default(), + }; + + let total_accounts_stats = bank.get_total_accounts_stats().unwrap(); + bank.accounts_data_len + .store(total_accounts_stats.data_len as u64, Release); + + bank } pub fn new_with_paths_for_tests( @@ -1263,7 +1367,7 @@ impl Bank { // genesis needs stakes for all epochs up to the epoch implied by // slot = 0 and genesis configuration { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); for epoch in 0..=bank.get_leader_schedule_epoch(bank.slot) { bank.epoch_stakes .insert(epoch, EpochStakes::new(&stakes, epoch)); @@ -1383,7 +1487,7 @@ impl Bank { transaction_entries_count: AtomicU64::new(0), transactions_per_entry_max: AtomicU64::new(0), // we will .clone_with_epoch() this soon after stake data update; so just .clone() for now - stakes: RwLock::new(parent.stakes.read().unwrap().clone()), + stakes_cache: StakesCache::new(parent.stakes_cache.stakes().clone()), epoch_stakes: parent.epoch_stakes.clone(), parent_hash: parent.hash(), parent_slot: parent.slot(), @@ -1421,6 +1525,7 @@ impl Bank { freeze_started: AtomicBool::new(false), cost_tracker: RwLock::new(CostTracker::default()), sysvar_cache: RwLock::new(Vec::new()), + accounts_data_len: AtomicU64::new(parent.accounts_data_len.load(Acquire)), }; let mut ancestors = Vec::with_capacity(1 + new.parents().len()); @@ -1440,10 +1545,7 @@ impl Bank { // Add new entry to stakes.stake_history, set appropriate epoch and // update vote accounts with warmed up stakes before saving a // snapshot of stakes in epoch stakes - new.stakes - .write() - .unwrap() - .activate_epoch(epoch, &thread_pool); + new.stakes_cache.activate_epoch(epoch, &thread_pool); // Save a snapshot of stakes for use in consensus and stake weighted networking let leader_schedule_epoch = epoch_schedule.get_leader_schedule_epoch(slot); @@ -1485,6 +1587,10 @@ impl Bank { .scan_results_limit_bytes } + pub fn proper_ancestors_set(&self) -> HashSet { + HashSet::from_iter(self.proper_ancestors()) + } + /// Returns all ancestors excluding self.slot. pub(crate) fn proper_ancestors(&self) -> impl Iterator + '_ { self.ancestors @@ -1536,6 +1642,7 @@ impl Bank { debug_keys: Option>>, additional_builtins: Option<&Builtins>, debug_do_not_add_builtins: bool, + accounts_data_len: u64, ) -> Self { fn new() -> T { T::default() @@ -1576,7 +1683,7 @@ impl Bank { rent_collector: fields.rent_collector.clone_with_epoch(fields.epoch), epoch_schedule: fields.epoch_schedule, inflation: Arc::new(RwLock::new(fields.inflation)), - stakes: RwLock::new(fields.stakes), + stakes_cache: StakesCache::new(fields.stakes), epoch_stakes: fields.epoch_stakes, is_delta: AtomicBool::new(fields.is_delta), builtin_programs: new(), @@ -1598,6 +1705,7 @@ impl Bank { vote_only_bank: false, cost_tracker: RwLock::new(CostTracker::default()), sysvar_cache: RwLock::new(Vec::new()), + accounts_data_len: AtomicU64::new(accounts_data_len), }; bank.finish_init( genesis_config, @@ -1677,7 +1785,7 @@ impl Bank { rent_collector: self.rent_collector.clone(), epoch_schedule: self.epoch_schedule, inflation: *self.inflation.read().unwrap(), - stakes: &self.stakes, + stakes: &self.stakes_cache, epoch_stakes: &self.epoch_stakes, is_delta: self.is_delta.load(Relaxed), } @@ -1893,6 +2001,18 @@ impl Bank { }); } + pub fn set_sysvar_for_tests(&self, sysvar: &T) + where + T: Sysvar + SysvarId, + { + self.update_sysvar_account(&T::id(), |account| { + create_account( + sysvar, + self.inherit_specially_retained_account_fields(account), + ) + }); + } + fn update_slot_history(&self) { self.update_sysvar_account(&sysvar::slot_history::id(), |account| { let mut slot_history = account @@ -1946,12 +2066,11 @@ impl Bank { }); let new_epoch_stakes = - EpochStakes::new(&self.stakes.read().unwrap(), leader_schedule_epoch); + EpochStakes::new(&self.stakes_cache.stakes(), leader_schedule_epoch); { let vote_stakes: HashMap<_, _> = self - .stakes - .read() - .unwrap() + .stakes_cache + .stakes() .vote_accounts() .iter() .map(|(pubkey, (stake, _))| (*pubkey, *stake)) @@ -2008,7 +2127,7 @@ impl Bank { // if I'm the first Bank in an epoch, ensure stake_history is updated self.update_sysvar_account(&sysvar::stake_history::id(), |account| { create_account::( - self.stakes.read().unwrap().history(), + self.stakes_cache.stakes().history(), self.inherit_specially_retained_account_fields(account), ) }); @@ -2081,7 +2200,7 @@ impl Bank { let validator_rewards = (validator_rate * capitalization as f64 * epoch_duration_in_years) as u64; - let old_vote_balance_and_staked = self.stakes.read().unwrap().vote_balance_and_staked(); + let old_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); let validator_point_value = self.pay_validator_rewards_with_thread_pool( prev_epoch, @@ -2104,7 +2223,7 @@ impl Bank { }); } - let new_vote_balance_and_staked = self.stakes.read().unwrap().vote_balance_and_staked(); + let new_vote_balance_and_staked = self.stakes_cache.stakes().vote_balance_and_staked(); let validator_rewards_paid = new_vote_balance_and_staked - old_vote_balance_and_staked; assert_eq!( validator_rewards_paid, @@ -2136,7 +2255,7 @@ impl Bank { .fetch_add(validator_rewards_paid, Relaxed); let active_stake = if let Some(stake_history_entry) = - self.stakes.read().unwrap().history().get(prev_epoch) + self.stakes_cache.stakes().history().get(prev_epoch) { stake_history_entry.effective } else { @@ -2166,13 +2285,12 @@ impl Bank { &self, thread_pool: &ThreadPool, reward_calc_tracer: Option, - ) -> DashMap { - let filter_stake_delegation_accounts = self - .feature_set - .is_active(&feature_set::filter_stake_delegation_accounts::id()); - - let stakes = self.stakes.read().unwrap(); - let accounts = DashMap::with_capacity(stakes.vote_accounts().as_ref().len()); + ) -> LoadVoteAndStakeAccountsResult { + let stakes = self.stakes_cache.stakes(); + let vote_with_stake_delegations_map = + DashMap::with_capacity(stakes.vote_accounts().as_ref().len()); + let invalid_stake_keys: DashMap = DashMap::new(); + let invalid_vote_keys: DashMap = DashMap::new(); thread_pool.install(|| { stakes @@ -2180,88 +2298,93 @@ impl Bank { .par_iter() .for_each(|(stake_pubkey, delegation)| { let vote_pubkey = &delegation.voter_pubkey; - let stake_account = match self.get_account_with_fixed_root(stake_pubkey) { - Some(stake_account) => stake_account, - None => return, - }; + if invalid_vote_keys.contains_key(vote_pubkey) { + return; + } - // fetch vote account from stakes cache if it hasn't been cached locally - let fetched_vote_account = if !accounts.contains_key(vote_pubkey) { - let vote_account = match self.get_account_with_fixed_root(vote_pubkey) { - Some(vote_account) => vote_account, - None => return, - }; + let stake_delegation = match self.get_account_with_fixed_root(stake_pubkey) { + Some(stake_account) => { + if stake_account.owner() != &solana_stake_program::id() { + invalid_stake_keys + .insert(*stake_pubkey, InvalidCacheEntryReason::WrongOwner); + return; + } - let vote_state: VoteState = - match StateMut::::state(&vote_account) { - Ok(vote_state) => vote_state.convert_to_current(), - Err(err) => { - debug!( - "failed to deserialize vote account {}: {}", - vote_pubkey, err - ); + match stake_account.state().ok() { + Some(stake_state) => (*stake_pubkey, (stake_state, stake_account)), + None => { + invalid_stake_keys + .insert(*stake_pubkey, InvalidCacheEntryReason::BadState); return; } - }; + } + } + None => { + invalid_stake_keys + .insert(*stake_pubkey, InvalidCacheEntryReason::Missing); + return; + } + }; - Some((vote_state, vote_account)) + let mut vote_delegations = if let Some(vote_delegations) = + vote_with_stake_delegations_map.get_mut(vote_pubkey) + { + vote_delegations } else { - None - }; + let vote_account = match self.get_account_with_fixed_root(vote_pubkey) { + Some(vote_account) => { + if vote_account.owner() != &solana_vote_program::id() { + invalid_vote_keys + .insert(*vote_pubkey, InvalidCacheEntryReason::WrongOwner); + return; + } + vote_account + } + None => { + invalid_vote_keys + .insert(*vote_pubkey, InvalidCacheEntryReason::Missing); + return; + } + }; - let fetched_vote_account_owner = fetched_vote_account - .as_ref() - .map(|(_vote_state, vote_account)| vote_account.owner()); + let vote_state = if let Ok(vote_state) = + StateMut::::state(&vote_account) + { + vote_state.convert_to_current() + } else { + invalid_vote_keys + .insert(*vote_pubkey, InvalidCacheEntryReason::BadState); + return; + }; + + vote_with_stake_delegations_map + .entry(*vote_pubkey) + .or_insert_with(|| VoteWithStakeDelegations { + vote_state: Arc::new(vote_state), + vote_account, + delegations: vec![], + }) + }; if let Some(reward_calc_tracer) = reward_calc_tracer.as_ref() { reward_calc_tracer(&RewardCalculationEvent::Staking( stake_pubkey, &InflationPointCalculationEvent::Delegation( *delegation, - fetched_vote_account_owner - .cloned() - .unwrap_or_else(solana_vote_program::id), + solana_vote_program::id(), ), )); } - // filter invalid delegation accounts - if filter_stake_delegation_accounts - && (stake_account.owner() != &solana_stake_program::id() - || (fetched_vote_account_owner.is_some() - && fetched_vote_account_owner != Some(&solana_vote_program::id()))) - { - datapoint_warn!( - "bank-stake_delegation_accounts-invalid-account", - ("slot", self.slot() as i64, i64), - ("stake-address", format!("{:?}", stake_pubkey), String), - ("vote-address", format!("{:?}", vote_pubkey), String), - ); - return; - } - - let stake_delegation = match stake_account.state().ok() { - Some(stake_state) => (*stake_pubkey, (stake_state, stake_account)), - None => return, - }; - - if let Some((vote_state, vote_account)) = fetched_vote_account { - accounts - .entry(*vote_pubkey) - .or_insert_with(|| VoteWithStakeDelegations { - vote_state: Arc::new(vote_state), - vote_account, - delegations: vec![], - }); - } - - if let Some(mut stake_delegation_accounts) = accounts.get_mut(vote_pubkey) { - stake_delegation_accounts.delegations.push(stake_delegation); - } + vote_delegations.delegations.push(stake_delegation); }); }); - accounts + LoadVoteAndStakeAccountsResult { + vote_with_stake_delegations_map, + invalid_vote_keys, + invalid_stake_keys, + } } /// iterate over all stakes, redeem vote credits for each stake we can @@ -2274,14 +2397,31 @@ impl Bank { fix_activating_credits_observed: bool, thread_pool: &ThreadPool, ) -> f64 { - let stake_history = self.stakes.read().unwrap().history().clone(); - let vote_and_stake_accounts = self.load_vote_and_stake_accounts_with_thread_pool( - thread_pool, - reward_calc_tracer.as_ref(), - ); + let stake_history = self.stakes_cache.stakes().history().clone(); + let vote_with_stake_delegations_map = { + let LoadVoteAndStakeAccountsResult { + vote_with_stake_delegations_map, + invalid_stake_keys, + invalid_vote_keys, + } = self.load_vote_and_stake_accounts_with_thread_pool( + thread_pool, + reward_calc_tracer.as_ref(), + ); + + let evict_invalid_stakes_cache_entries = self + .feature_set + .is_active(&feature_set::evict_invalid_stakes_cache_entries::id()); + self.stakes_cache.handle_invalid_keys( + invalid_stake_keys, + invalid_vote_keys, + evict_invalid_stakes_cache_entries, + self.slot(), + ); + vote_with_stake_delegations_map + }; let points: u128 = thread_pool.install(|| { - vote_and_stake_accounts + vote_with_stake_delegations_map .par_iter() .map(|entry| { let VoteWithStakeDelegations { @@ -2312,8 +2452,8 @@ impl Bank { // pay according to point value let point_value = PointValue { rewards, points }; let vote_account_rewards: DashMap = - DashMap::with_capacity(vote_and_stake_accounts.len()); - let stake_delegation_iterator = vote_and_stake_accounts.into_par_iter().flat_map( + DashMap::with_capacity(vote_with_stake_delegations_map.len()); + let stake_delegation_iterator = vote_with_stake_delegations_map.into_par_iter().flat_map( |( vote_pubkey, VoteWithStakeDelegations { @@ -2701,9 +2841,8 @@ impl Bank { // highest staked node is the first collector self.collector_id = self - .stakes - .read() - .unwrap() + .stakes_cache + .stakes() .highest_staked_node() .unwrap_or_default(); @@ -2938,30 +3077,22 @@ impl Bank { .clear_slot_entries(slot); } - pub fn can_commit(result: &Result<()>) -> bool { - match result { - Ok(_) => true, - Err(TransactionError::InstructionError(_, _)) => true, - Err(_) => false, - } - } - fn update_transaction_statuses( &self, sanitized_txs: &[SanitizedTransaction], - res: &[TransactionExecutionResult], + execution_results: &[TransactionExecutionResult], ) { let mut status_cache = self.src.status_cache.write().unwrap(); - assert_eq!(sanitized_txs.len(), res.len()); - for (tx, (res, _nonce)) in sanitized_txs.iter().zip(res) { - if Self::can_commit(res) { + assert_eq!(sanitized_txs.len(), execution_results.len()); + for (tx, execution_result) in sanitized_txs.iter().zip(execution_results) { + if let TransactionExecutionResult::Executed(details) = execution_result { // Add the message hash to the status cache to ensure that this message // won't be processed again with a different signature. status_cache.insert( tx.message().recent_blockhash(), tx.message_hash(), self.slot(), - res.clone(), + details.status.clone(), ); // Add the transaction signature to the status cache so that transaction status // can be queried by transaction signature over RPC. In the future, this should @@ -2970,7 +3101,7 @@ impl Bank { tx.message().recent_blockhash(), tx.signature(), self.slot(), - res.clone(), + details.status.clone(), ); } } @@ -3017,7 +3148,7 @@ impl Bank { let lock_results = self .rc .accounts - .lock_accounts(sanitized_txs.iter(), self.demote_program_write_locks()); + .lock_accounts(sanitized_txs.iter(), &FeatureSet::all_enabled()); TransactionBatch::new(lock_results, self, Cow::Owned(sanitized_txs)) } @@ -3036,7 +3167,7 @@ impl Bank { let lock_results = self .rc .accounts - .lock_accounts(sanitized_txs.iter(), self.demote_program_write_locks()); + .lock_accounts(sanitized_txs.iter(), &FeatureSet::all_enabled()); Ok(TransactionBatch::new( lock_results, self, @@ -3052,7 +3183,7 @@ impl Bank { let lock_results = self .rc .accounts - .lock_accounts(txs.iter(), self.demote_program_write_locks()); + .lock_accounts(txs.iter(), &self.feature_set); TransactionBatch::new(lock_results, self, Cow::Borrowed(txs)) } @@ -3067,7 +3198,7 @@ impl Bank { let lock_results = self.rc.accounts.lock_accounts_with_results( transactions.iter(), transaction_results, - self.demote_program_write_locks(), + &self.feature_set, ); TransactionBatch::new(lock_results, self, Cow::Borrowed(transactions)) } @@ -3077,7 +3208,9 @@ impl Bank { &'a self, transaction: SanitizedTransaction, ) -> TransactionBatch<'a, '_> { - let mut batch = TransactionBatch::new(vec![Ok(())], self, Cow::Owned(vec![transaction])); + let lock_result = transaction.get_account_locks(&self.feature_set).map(|_| ()); + let mut batch = + TransactionBatch::new(vec![lock_result], self, Cow::Owned(vec![transaction])); batch.needs_unlock = false; batch } @@ -3089,15 +3222,22 @@ impl Bank { ) -> TransactionSimulationResult { assert!(self.is_frozen(), "simulation bank must be frozen"); + self.simulate_transaction_unchecked(transaction) + } + + /// Run transactions against a bank without committing the results; does not check if the bank + /// is frozen, enabling use in single-Bank test frameworks + pub fn simulate_transaction_unchecked( + &self, + transaction: SanitizedTransaction, + ) -> TransactionSimulationResult { let number_of_accounts = transaction.message().account_keys_len(); let batch = self.prepare_simulation_batch(transaction); let mut timings = ExecuteTimings::default(); let ( loaded_transactions, - executed, - _inner_instructions, - logs, + mut execution_results, _retryable_transactions, _transaction_count, _signature_count, @@ -3112,8 +3252,6 @@ impl Bank { &mut timings, ); - let result = executed[0].0.clone().map(|_| ()); - let logs = logs.get(0).cloned().flatten().unwrap_or_default(); let post_simulation_accounts = loaded_transactions .into_iter() .next() @@ -3139,8 +3277,16 @@ impl Bank { debug!("simulate_transaction: {:?}", timings); + let execution_result = execution_results.pop().unwrap(); + let flattened_result = execution_result.flattened_result(); + let logs = match execution_result { + TransactionExecutionResult::Executed(details) => details.log_messages, + TransactionExecutionResult::NotExecuted(_) => None, + } + .unwrap_or_default(); + TransactionSimulationResult { - result, + result: flattened_result, logs, post_simulation_accounts, units_consumed, @@ -3150,11 +3296,9 @@ impl Bank { pub fn unlock_accounts(&self, batch: &mut TransactionBatch) { if batch.needs_unlock { batch.needs_unlock = false; - self.rc.accounts.unlock_accounts( - batch.sanitized_transactions().iter(), - batch.lock_results(), - self.demote_program_write_locks(), - ) + self.rc + .accounts + .unlock_accounts(batch.sanitized_transactions().iter(), batch.lock_results()) } } @@ -3440,6 +3584,110 @@ impl Bank { cache.remove(pubkey); } + /// Execute a transaction using the provided loaded accounts and update + /// the executors cache if the transaction was successful. + fn execute_loaded_transaction( + &self, + tx: &SanitizedTransaction, + loaded_transaction: &mut LoadedTransaction, + compute_budget: ComputeBudget, + durable_nonce_fee: Option, + enable_cpi_recording: bool, + enable_log_recording: bool, + execute_details_timings: &mut ExecuteDetailsTimings, + error_counters: &mut ErrorCounters, + ) -> TransactionExecutionResult { + let legacy_message = match tx.message().legacy_message() { + Some(message) => message, + None => { + // TODO: support versioned messages + return TransactionExecutionResult::NotExecuted( + TransactionError::UnsupportedVersion, + ); + } + }; + + let executors = self.get_executors( + tx.message(), + &loaded_transaction.accounts, + &loaded_transaction.program_indices, + ); + + let account_refcells = Self::accounts_to_refcells(&mut loaded_transaction.accounts); + + let instruction_recorders = if enable_cpi_recording { + let ix_count = tx.message().instructions().len(); + let mut recorders = Vec::with_capacity(ix_count); + recorders.resize_with(ix_count, InstructionRecorder::default); + Some(recorders) + } else { + None + }; + + let log_collector = if enable_log_recording { + Some(LogCollector::new_ref()) + } else { + None + }; + + let (blockhash, lamports_per_signature) = self.last_blockhash_and_lamports_per_signature(); + let process_result = MessageProcessor::process_message( + &self.builtin_programs.vec, + legacy_message, + &loaded_transaction.program_indices, + &account_refcells, + self.rent_collector.rent, + log_collector.clone(), + executors.clone(), + instruction_recorders.as_deref(), + self.feature_set.clone(), + compute_budget, + execute_details_timings, + &*self.sysvar_cache.read().unwrap(), + blockhash, + lamports_per_signature, + ); + + let log_messages: Option = + log_collector.and_then(|log_collector| { + Rc::try_unwrap(log_collector) + .map(|log_collector| log_collector.into_inner().into()) + .ok() + }); + + let inner_instructions: Option = + instruction_recorders.and_then(|instruction_recorders| { + instruction_recorders + .into_iter() + .map(|r| r.compile_instructions(tx.message())) + .collect() + }); + + if let Err(e) = + Self::refcells_to_accounts(&mut loaded_transaction.accounts, account_refcells) + { + warn!("Account lifetime mismanagement"); + return TransactionExecutionResult::NotExecuted(e); + } + + let status = process_result + .map(|info| { + self.update_accounts_data_len(info.accounts_data_len_delta); + self.update_executors(executors); + }) + .map_err(|err| { + error_counters.instruction_error += 1; + err + }); + + TransactionExecutionResult::Executed(TransactionExecutionDetails { + status, + log_messages, + inner_instructions, + durable_nonce_fee, + }) + } + #[allow(clippy::type_complexity)] pub fn load_and_execute_transactions( &self, @@ -3451,8 +3699,6 @@ impl Bank { ) -> ( Vec, Vec, - Vec>, - Vec>, Vec, u64, u64, @@ -3501,124 +3747,36 @@ impl Bank { let mut execution_time = Measure::start("execution_time"); let mut signature_count: u64 = 0; - let mut inner_instructions: Vec> = - Vec::with_capacity(sanitized_txs.len()); - let mut transaction_log_messages: Vec>> = - Vec::with_capacity(sanitized_txs.len()); - let executed: Vec = loaded_txs + let execute_details_timings = &mut timings.details; + let execution_results: Vec = loaded_txs .iter_mut() .zip(sanitized_txs.iter()) .map(|(accs, tx)| match accs { - (Err(e), _nonce) => { - transaction_log_messages.push(None); - inner_instructions.push(None); - (Err(e.clone()), None) - } + (Err(e), _nonce) => TransactionExecutionResult::NotExecuted(e.clone()), (Ok(loaded_transaction), nonce) => { let feature_set = self.feature_set.clone(); signature_count += u64::from(tx.message().header().num_required_signatures); let mut compute_budget = self.compute_budget.unwrap_or_else(ComputeBudget::new); - - let mut process_result = if feature_set.is_active(&tx_wide_compute_cap::id()) { - compute_budget.process_transaction(tx, feature_set.clone()) - } else { - Ok(()) - }; - - if process_result.is_ok() { - let executors = self.get_executors( - tx.message(), - &loaded_transaction.accounts, - &loaded_transaction.program_indices, - ); - - let account_refcells = - Self::accounts_to_refcells(&mut loaded_transaction.accounts); - - let instruction_recorders = if enable_cpi_recording { - let ix_count = tx.message().instructions().len(); - let mut recorders = Vec::with_capacity(ix_count); - recorders.resize_with(ix_count, InstructionRecorder::default); - Some(recorders) - } else { - None - }; - - let log_collector = if enable_log_recording { - Some(LogCollector::new_ref()) - } else { - None - }; - - let (blockhash, lamports_per_signature) = - self.last_blockhash_and_lamports_per_signature(); - - if let Some(legacy_message) = tx.message().legacy_message() { - process_result = MessageProcessor::process_message( - &self.builtin_programs.vec, - legacy_message, - &loaded_transaction.program_indices, - &account_refcells, - self.rent_collector.rent, - log_collector.clone(), - executors.clone(), - instruction_recorders.as_deref(), - feature_set, - compute_budget, - &mut timings.details, - &*self.sysvar_cache.read().unwrap(), - blockhash, - lamports_per_signature, - ); - } else { - // TODO: support versioned messages - process_result = Err(TransactionError::UnsupportedVersion); + if feature_set.is_active(&tx_wide_compute_cap::id()) { + if let Err(err) = compute_budget.process_transaction(tx, feature_set) { + return TransactionExecutionResult::NotExecuted(err); } - - let log_messages: Option = - log_collector.and_then(|log_collector| { - Rc::try_unwrap(log_collector) - .map(|log_collector| log_collector.into_inner().into()) - .ok() - }); - transaction_log_messages.push(log_messages); - let inner_instruction_list: Option = - instruction_recorders.and_then(|instruction_recorders| { - instruction_recorders - .into_iter() - .map(|r| r.compile_instructions(tx.message())) - .collect() - }); - inner_instructions.push(inner_instruction_list); - - if let Err(e) = Self::refcells_to_accounts( - &mut loaded_transaction.accounts, - account_refcells, - ) { - warn!("Account lifetime mismanagement"); - process_result = Err(e); - } - - if process_result.is_ok() { - self.update_executors(executors); - } - } else { - transaction_log_messages.push(None); - inner_instructions.push(None); } - let nonce = match &process_result { - Ok(_) => nonce.clone(), // May need to calculate the fee based on the nonce - Err(TransactionError::InstructionError(_, _)) => { - error_counters.instruction_error += 1; - nonce.clone() // May need to advance the nonce - } - _ => None, - }; - - (process_result, nonce) + let durable_nonce_fee = nonce.as_ref().map(DurableNonceFee::from); + + self.execute_loaded_transaction( + tx, + loaded_transaction, + compute_budget, + durable_nonce_fee, + enable_cpi_recording, + enable_log_recording, + execute_details_timings, + &mut error_counters, + ) } }) .collect(); @@ -3641,17 +3799,18 @@ impl Bank { let transaction_log_collector_config = self.transaction_log_collector_config.read().unwrap(); - for (i, ((r, _nonce), tx)) in executed.iter().zip(sanitized_txs).enumerate() { + for (execution_result, tx) in execution_results.iter().zip(sanitized_txs) { if let Some(debug_keys) = &self.transaction_debug_keys { for key in tx.message().account_keys_iter() { if debug_keys.contains(key) { - info!("slot: {} result: {:?} tx: {:?}", self.slot, r, tx); + let result = execution_result.flattened_result(); + info!("slot: {} result: {:?} tx: {:?}", self.slot, result, tx); break; } } } - if Self::can_commit(r) // Skip log collection for unprocessed transactions + if execution_result.was_executed() // Skip log collection for unprocessed transactions && transaction_log_collector_config.filter != TransactionLogCollectorFilter::None { let mut filtered_mentioned_addresses = Vec::new(); @@ -3682,16 +3841,21 @@ impl Bank { }; if store { - if let Some(log_messages) = transaction_log_messages.get(i).cloned().flatten() { + if let TransactionExecutionResult::Executed(TransactionExecutionDetails { + status, + log_messages: Some(log_messages), + .. + }) = execution_result + { let mut transaction_log_collector = self.transaction_log_collector.write().unwrap(); let transaction_log_index = transaction_log_collector.logs.len(); transaction_log_collector.logs.push(TransactionLogInfo { signature: *tx.signature(), - result: r.clone(), + result: status.clone(), is_vote, - log_messages, + log_messages: log_messages.clone(), }); for key in filtered_mentioned_addresses.into_iter() { transaction_log_collector @@ -3704,13 +3868,16 @@ impl Bank { } } - if r.is_ok() { - tx_count += 1; - } else { - if *err_count == 0 { - debug!("tx error: {:?} {:?}", r, tx); + match execution_result.flattened_result() { + Ok(()) => { + tx_count += 1; + } + Err(err) => { + if *err_count == 0 { + debug!("tx error: {:?} {:?}", err, tx); + } + *err_count += 1; } - *err_count += 1; } } if *err_count > 0 { @@ -3723,15 +3890,25 @@ impl Bank { Self::update_error_counters(&error_counters); ( loaded_txs, - executed, - inner_instructions, - transaction_log_messages, + execution_results, retryable_txs, tx_count, signature_count, ) } + /// Update the bank's accounts_data_len field based on the `delta`. + fn update_accounts_data_len(&self, delta: i64) { + if delta == 0 { + return; + } + if delta > 0 { + self.accounts_data_len.fetch_add(delta as u64, AcqRel); + } else { + self.accounts_data_len.fetch_sub(delta.abs() as u64, AcqRel); + } + } + /// Calculate fee for `SanitizedMessage` pub fn calculate_fee(message: &SanitizedMessage, lamports_per_signature: u64) -> u64 { let mut num_signatures = u64::from(message.header().num_required_signatures); @@ -3757,10 +3934,16 @@ impl Bank { let results = txs .iter() .zip(execution_results) - .map(|(tx, (execution_result, nonce))| { - let (lamports_per_signature, is_nonce) = nonce - .as_ref() - .map(|nonce| nonce.lamports_per_signature()) + .map(|(tx, execution_result)| { + let (execution_status, durable_nonce_fee) = match &execution_result { + TransactionExecutionResult::Executed(details) => { + Ok((&details.status, details.durable_nonce_fee.as_ref())) + } + TransactionExecutionResult::NotExecuted(err) => Err(err.clone()), + }?; + + let (lamports_per_signature, is_nonce) = durable_nonce_fee + .map(|durable_nonce_fee| durable_nonce_fee.lamports_per_signature()) .map(|maybe_lamports_per_signature| (maybe_lamports_per_signature, true)) .unwrap_or_else(|| { ( @@ -3773,27 +3956,19 @@ impl Bank { lamports_per_signature.ok_or(TransactionError::BlockhashNotFound)?; let fee = Self::calculate_fee(tx.message(), lamports_per_signature); - match *execution_result { - Err(TransactionError::InstructionError(_, _)) => { - // In case of instruction error, even though no accounts - // were stored we still need to charge the payer the - // fee. - // - //...except nonce accounts, which already have their - // post-load, fee deducted, pre-execute account state - // stored - if !is_nonce { - self.withdraw(tx.message().fee_payer(), fee)?; - } - fees += fee; - Ok(()) - } - Ok(()) => { - fees += fee; - Ok(()) - } - _ => execution_result.clone(), + // In case of instruction error, even though no accounts + // were stored we still need to charge the payer the + // fee. + // + //...except nonce accounts, which already have their + // post-load, fee deducted, pre-execute account state + // stored + if execution_status.is_err() && !is_nonce { + self.withdraw(tx.message().fee_payer(), fee)?; } + + fees += fee; + Ok(()) }) .collect(); @@ -3805,7 +3980,7 @@ impl Bank { &self, sanitized_txs: &[SanitizedTransaction], loaded_txs: &mut [TransactionLoadResult], - executed_results: &[TransactionExecutionResult], + execution_results: Vec, tx_count: u64, signature_count: u64, timings: &mut ExecuteTimings, @@ -3831,10 +4006,7 @@ impl Bank { .fetch_max(processed_tx_count, Relaxed); } - if executed_results - .iter() - .any(|(res, _)| Self::can_commit(res)) - { + if execution_results.iter().any(|result| result.was_executed()) { self.is_delta.store(true, Relaxed); } @@ -3843,19 +4015,18 @@ impl Bank { self.rc.accounts.store_cached( self.slot(), sanitized_txs, - executed_results, + &execution_results, loaded_txs, &self.rent_collector, &blockhash, lamports_per_signature, self.rent_for_sysvars(), - self.demote_program_write_locks(), self.leave_nonce_on_success(), ); - let rent_debits = self.collect_rent(executed_results, loaded_txs); + let rent_debits = self.collect_rent(&execution_results, loaded_txs); let mut update_stakes_cache_time = Measure::start("update_stakes_cache_time"); - self.update_stakes_cache(sanitized_txs, executed_results, loaded_txs); + self.update_stakes_cache(sanitized_txs, &execution_results, loaded_txs); update_stakes_cache_time.stop(); // once committed there is no way to unroll @@ -3869,13 +4040,13 @@ impl Bank { timings.update_stakes_cache_us = timings .update_stakes_cache_us .saturating_add(update_stakes_cache_time.as_us()); - self.update_transaction_statuses(sanitized_txs, executed_results); + self.update_transaction_statuses(sanitized_txs, &execution_results); let fee_collection_results = - self.filter_program_errors_and_collect_fee(sanitized_txs, executed_results); + self.filter_program_errors_and_collect_fee(sanitized_txs, &execution_results); TransactionResults { fee_collection_results, - execution_results: executed_results.to_vec(), + execution_results, rent_debits, } } @@ -4039,24 +4210,24 @@ impl Bank { fn collect_rent( &self, - res: &[TransactionExecutionResult], + execution_results: &[TransactionExecutionResult], loaded_txs: &mut [TransactionLoadResult], ) -> Vec { let mut collected_rent: u64 = 0; - let mut rent_debits: Vec = Vec::with_capacity(loaded_txs.len()); - for (i, (raccs, _nonce)) in loaded_txs.iter_mut().enumerate() { - let (res, _nonce) = &res[i]; - if res.is_err() || raccs.is_err() { - rent_debits.push(RentDebits::default()); - continue; - } - - let loaded_transaction = raccs.as_mut().unwrap(); - - collected_rent += loaded_transaction.rent; - rent_debits.push(mem::take(&mut loaded_transaction.rent_debits)); - } - + let rent_debits: Vec<_> = loaded_txs + .iter_mut() + .zip(execution_results) + .map(|((load_result, _nonce), execution_result)| { + if let (Ok(loaded_transaction), true) = + (load_result, execution_result.was_executed_successfully()) + { + collected_rent += loaded_transaction.rent; + mem::take(&mut loaded_transaction.rent_debits) + } else { + RentDebits::default() + } + }) + .collect(); self.collected_rent.fetch_add(collected_rent, Relaxed); rent_debits } @@ -4548,38 +4719,26 @@ impl Bank { enable_cpi_recording: bool, enable_log_recording: bool, timings: &mut ExecuteTimings, - ) -> ( - TransactionResults, - TransactionBalancesSet, - Vec>, - Vec>, - ) { + ) -> (TransactionResults, TransactionBalancesSet) { let pre_balances = if collect_balances { self.collect_balances(batch) } else { vec![] }; - let ( - mut loaded_txs, - executed, - inner_instructions, - transaction_logs, - _, - tx_count, - signature_count, - ) = self.load_and_execute_transactions( - batch, - max_age, - enable_cpi_recording, - enable_log_recording, - timings, - ); + let (mut loaded_txs, execution_results, _, tx_count, signature_count) = self + .load_and_execute_transactions( + batch, + max_age, + enable_cpi_recording, + enable_log_recording, + timings, + ); let results = self.commit_transactions( batch.sanitized_transactions(), &mut loaded_txs, - &executed, + execution_results, tx_count, signature_count, timings, @@ -4592,8 +4751,6 @@ impl Bank { ( results, TransactionBalancesSet::new(pre_balances, post_balances), - inner_instructions, - transaction_logs, ) } @@ -4709,13 +4866,11 @@ impl Bank { .accounts .store_slow_cached(self.slot(), pubkey, account); - if Stakes::is_stake(account) { - self.stakes.write().unwrap().store( - pubkey, - account, - self.stakes_remove_delegation_if_inactive_enabled(), - ); - } + self.stakes_cache.check_and_store( + pubkey, + account, + self.stakes_remove_delegation_if_inactive_enabled(), + ); } pub fn force_flush_accounts_cache(&self) { @@ -5442,45 +5597,42 @@ impl Bank { fn update_stakes_cache( &self, txs: &[SanitizedTransaction], - res: &[TransactionExecutionResult], + execution_results: &[TransactionExecutionResult], loaded_txs: &[TransactionLoadResult], ) { - for (i, ((raccs, _load_nonce), tx)) in loaded_txs.iter().zip(txs).enumerate() { - let (res, _res_nonce) = &res[i]; - if res.is_err() || raccs.is_err() { - continue; - } - - let message = tx.message(); - let loaded_transaction = raccs.as_ref().unwrap(); - - for (_i, (pubkey, account)) in (0..message.account_keys_len()) - .zip(loaded_transaction.accounts.iter()) - .filter(|(_i, (_pubkey, account))| (Stakes::is_stake(account))) - { - self.stakes.write().unwrap().store( - pubkey, - account, - self.stakes_remove_delegation_if_inactive_enabled(), - ); + for (i, ((load_result, _load_nonce), tx)) in loaded_txs.iter().zip(txs).enumerate() { + if let (Ok(loaded_transaction), true) = ( + load_result, + execution_results[i].was_executed_successfully(), + ) { + let message = tx.message(); + for (_i, (pubkey, account)) in + (0..message.account_keys_len()).zip(loaded_transaction.accounts.iter()) + { + self.stakes_cache.check_and_store( + pubkey, + account, + self.stakes_remove_delegation_if_inactive_enabled(), + ); + } } } } pub fn staked_nodes(&self) -> Arc> { - self.stakes.read().unwrap().staked_nodes() + self.stakes_cache.stakes().staked_nodes() } /// current vote accounts for this bank along with the stake /// attributed to each account pub fn vote_accounts(&self) -> Arc> { - let stakes = self.stakes.read().unwrap(); + let stakes = self.stakes_cache.stakes(); Arc::from(stakes.vote_accounts()) } /// Vote account for the given vote account pubkey along with the stake. pub fn get_vote_account(&self, vote_account: &Pubkey) -> Option<(/*stake:*/ u64, VoteAccount)> { - let stakes = self.stakes.read().unwrap(); + let stakes = self.stakes_cache.stakes(); stakes.vote_accounts().get(vote_account).cloned() } @@ -5716,11 +5868,6 @@ impl Bank { .is_active(&feature_set::stake_program_advance_activating_credits_observed::id()) } - pub fn demote_program_write_locks(&self) -> bool { - self.feature_set - .is_active(&feature_set::demote_program_write_locks::id()) - } - pub fn leave_nonce_on_success(&self) -> bool { self.feature_set .is_active(&feature_set::leave_nonce_on_success::id()) @@ -6204,6 +6351,7 @@ pub(crate) mod tests { system_program, sysvar::rewards::Rewards, timing::duration_as_s, + transaction::MAX_TX_ACCOUNT_LOCKS, }, solana_vote_program::{ vote_instruction, @@ -6217,7 +6365,7 @@ pub(crate) mod tests { impl Bank { fn cloned_stake_delegations(&self) -> StakeDelegations { - self.stakes.read().unwrap().stake_delegations().clone() + self.stakes_cache.stakes().stake_delegations().clone() } } @@ -6228,6 +6376,18 @@ pub(crate) mod tests { Message::new(instructions, payer).try_into().unwrap() } + fn new_execution_result( + status: Result<()>, + nonce: Option<&NonceFull>, + ) -> TransactionExecutionResult { + TransactionExecutionResult::Executed(TransactionExecutionDetails { + status, + log_messages: None, + inner_instructions: None, + durable_nonce_fee: nonce.map(DurableNonceFee::from), + }) + } + #[test] fn test_nonce_info() { let lamports_per_signature = 42; @@ -7221,10 +7381,7 @@ pub(crate) mod tests { assert_eq!( bank.process_transaction(&tx), - Err(TransactionError::InstructionError( - 0, - InstructionError::ExecutableLamportChange - )) + Err(TransactionError::InvalidWritableAccount) ); assert_eq!(bank.get_balance(&account_pubkey), account_balance); } @@ -8284,6 +8441,7 @@ pub(crate) mod tests { let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); let validator_points: u128 = bank0 .load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()) + .vote_with_stake_delegations_map .into_iter() .map( |( @@ -8969,8 +9127,8 @@ pub(crate) mod tests { )); let results = vec![ - (Ok(()), None), - ( + new_execution_result(Ok(()), None), + new_execution_result( Err(TransactionError::InstructionError( 1, SystemError::ResultWithNegativeLamports.into(), @@ -10503,10 +10661,10 @@ pub(crate) mod tests { // Non-builtin loader accounts can not be used for instruction processing { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(stakes.vote_accounts().as_ref().is_empty()); } - assert!(bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); let ((vote_id, vote_account), (stake_id, stake_account)) = @@ -10516,19 +10674,19 @@ pub(crate) mod tests { bank.store_account(&vote_id, &vote_account); bank.store_account(&stake_id, &stake_account); { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(!stakes.vote_accounts().as_ref().is_empty()); } - assert!(!bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(!bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); bank.add_builtin("mock_program1", &vote_id, mock_ix_processor); bank.add_builtin("mock_program2", &stake_id, mock_ix_processor); { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(stakes.vote_accounts().as_ref().is_empty()); } - assert!(bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", @@ -10548,10 +10706,10 @@ pub(crate) mod tests { let new_hash = bank.get_accounts_hash(); assert_eq!(old_hash, new_hash); { - let stakes = bank.stakes.read().unwrap(); + let stakes = bank.stakes_cache.stakes(); assert!(stakes.vote_accounts().as_ref().is_empty()); } - assert!(bank.stakes.read().unwrap().stake_delegations().is_empty()); + assert!(bank.stakes_cache.stakes().stake_delegations().is_empty()); assert_eq!(bank.calculate_capitalization(true), bank.capitalization()); assert_eq!( "mock_program1", @@ -11338,8 +11496,8 @@ pub(crate) mod tests { let txs = vec![tx0, tx1, tx2]; let lock_result = bank0.prepare_batch_for_tests(txs); - let (transaction_results, transaction_balances_set, inner_instructions, transaction_logs) = - bank0.load_execute_and_commit_transactions( + let (transaction_results, transaction_balances_set) = bank0 + .load_execute_and_commit_transactions( &lock_result, MAX_PROCESSING_AGE, true, @@ -11348,27 +11506,34 @@ pub(crate) mod tests { &mut ExecuteTimings::default(), ); - assert!(inner_instructions.iter().all(Option::is_none)); - assert!(transaction_logs.iter().all(Option::is_none)); - - assert_eq!(inner_instructions.len(), 3); - assert_eq!(transaction_logs.len(), 3); assert_eq!(transaction_balances_set.pre_balances.len(), 3); assert_eq!(transaction_balances_set.post_balances.len(), 3); - assert!(transaction_results.execution_results[0].0.is_ok()); + assert!(transaction_results.execution_results[0].was_executed_successfully()); assert_eq!(transaction_balances_set.pre_balances[0], vec![8, 11, 1]); assert_eq!(transaction_balances_set.post_balances[0], vec![5, 13, 1]); // Failed transactions still produce balance sets // This is a TransactionError - not possible to charge fees - assert!(transaction_results.execution_results[1].0.is_err()); + assert!(matches!( + transaction_results.execution_results[1], + TransactionExecutionResult::NotExecuted(TransactionError::AccountNotFound), + )); assert_eq!(transaction_balances_set.pre_balances[1], vec![0, 0, 1]); assert_eq!(transaction_balances_set.post_balances[1], vec![0, 0, 1]); // Failed transactions still produce balance sets // This is an InstructionError - fees charged - assert!(transaction_results.execution_results[2].0.is_err()); + assert!(matches!( + transaction_results.execution_results[2], + TransactionExecutionResult::Executed(TransactionExecutionDetails { + status: Err(TransactionError::InstructionError( + 0, + InstructionError::Custom(1), + )), + .. + }), + )); assert_eq!(transaction_balances_set.pre_balances[2], vec![9, 0, 1]); assert_eq!(transaction_balances_set.post_balances[2], vec![8, 0, 1]); } @@ -11572,6 +11737,43 @@ pub(crate) mod tests { assert_eq!(result, Err(TransactionError::AccountLoadedTwice)); } + #[test] + fn test_process_transaction_with_too_many_account_locks() { + solana_logger::setup(); + let (genesis_config, mint_keypair) = create_genesis_config(500); + let mut bank = Bank::new_for_tests(&genesis_config); + + let from_pubkey = solana_sdk::pubkey::new_rand(); + let to_pubkey = solana_sdk::pubkey::new_rand(); + + let account_metas = vec![ + AccountMeta::new(from_pubkey, false), + AccountMeta::new(to_pubkey, false), + ]; + + bank.add_builtin( + "mock_vote", + &solana_vote_program::id(), + mock_ok_vote_processor, + ); + + let instruction = + Instruction::new_with_bincode(solana_vote_program::id(), &10, account_metas); + let mut tx = Transaction::new_signed_with_payer( + &[instruction], + Some(&mint_keypair.pubkey()), + &[&mint_keypair], + bank.last_blockhash(), + ); + + while tx.message.account_keys.len() <= MAX_TX_ACCOUNT_LOCKS { + tx.message.account_keys.push(solana_sdk::pubkey::new_rand()); + } + + let result = bank.process_transaction(&tx); + assert_eq!(result, Err(TransactionError::TooManyAccountLocks)); + } + #[test] fn test_program_id_as_payer() { solana_logger::setup(); @@ -14278,19 +14480,16 @@ pub(crate) mod tests { let validator_vote_keypairs0 = ValidatorVoteKeypairs::new_rand(); let validator_vote_keypairs1 = ValidatorVoteKeypairs::new_rand(); let validator_keypairs = vec![&validator_vote_keypairs0, &validator_vote_keypairs1]; - let GenesisConfigInfo { - genesis_config, - mint_keypair: _, - voting_keypair: _, - } = create_genesis_config_with_vote_accounts( + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config_with_vote_accounts( 1_000_000_000, &validator_keypairs, vec![10_000; 2], ); let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let vote_and_stake_accounts = - bank.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()); + let vote_and_stake_accounts = bank + .load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()) + .vote_with_stake_delegations_map; assert_eq!(vote_and_stake_accounts.len(), 2); let mut vote_account = bank @@ -14330,8 +14529,9 @@ pub(crate) mod tests { // Accounts must be valid stake and vote accounts let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - let vote_and_stake_accounts = - bank.load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()); + let vote_and_stake_accounts = bank + .load_vote_and_stake_accounts_with_thread_pool(&thread_pool, null_tracer()) + .vote_with_stake_delegations_map; assert_eq!(vote_and_stake_accounts.len(), 0); } @@ -14438,7 +14638,7 @@ pub(crate) mod tests { let txs = vec![tx0, tx1, tx2]; let batch = bank.prepare_batch_for_tests(txs); - let log_results = bank + let execution_results = bank .load_execute_and_commit_transactions( &batch, MAX_PROCESSING_AGE, @@ -14447,11 +14647,28 @@ pub(crate) mod tests { true, &mut ExecuteTimings::default(), ) - .3; - assert_eq!(log_results.len(), 3); - assert!(log_results[0].as_ref().unwrap()[1].contains(&"success".to_string())); - assert!(log_results[1].as_ref().unwrap()[2].contains(&"failed".to_string())); - assert!(log_results[2].as_ref().is_none()); + .0 + .execution_results; + + assert_eq!(execution_results.len(), 3); + + assert!(execution_results[0].details().is_some()); + assert!(execution_results[0] + .details() + .unwrap() + .log_messages + .as_ref() + .unwrap()[1] + .contains(&"success".to_string())); + assert!(execution_results[1].details().is_some()); + assert!(execution_results[1] + .details() + .unwrap() + .log_messages + .as_ref() + .unwrap()[2] + .contains(&"failed".to_string())); + assert!(!execution_results[2].was_executed()); let stored_logs = &bank.transaction_log_collector.read().unwrap().logs; let success_log_info = stored_logs @@ -14946,44 +15163,6 @@ pub(crate) mod tests { } } - #[test] - fn test_verify_transactions_load_duplicate_account() { - let GenesisConfigInfo { genesis_config, .. } = - create_genesis_config_with_leader(42, &solana_sdk::pubkey::new_rand(), 42); - let bank = Bank::new_for_tests(&genesis_config); - - let mut rng = rand::thread_rng(); - let recent_blockhash = hash::new_rand(&mut rng); - let from_keypair = Keypair::new(); - let to_keypair = Keypair::new(); - let from_pubkey = from_keypair.pubkey(); - let to_pubkey = to_keypair.pubkey(); - - let make_transaction = || { - let mut message = Message::new( - &[system_instruction::transfer(&from_pubkey, &to_pubkey, 1)], - Some(&from_pubkey), - ); - let to_index = message - .account_keys - .iter() - .position(|k| k == &to_pubkey) - .unwrap(); - message.account_keys[to_index] = from_pubkey; - Transaction::new(&[&from_keypair], message, recent_blockhash) - }; - - // Duplicate account - { - let tx = make_transaction(); - assert_eq!( - bank.verify_transaction(tx.into(), TransactionVerificationMode::FullVerification) - .err(), - Some(TransactionError::AccountLoadedTwice), - ); - } - } - #[test] fn test_verify_transactions_packet_data_size() { let GenesisConfigInfo { genesis_config, .. } = diff --git a/runtime/src/bank_forks.rs b/runtime/src/bank_forks.rs index a0b119b6307d76..a34b1777281ef5 100644 --- a/runtime/src/bank_forks.rs +++ b/runtime/src/bank_forks.rs @@ -601,8 +601,8 @@ mod tests { let leader_keypair = Keypair::new(); let GenesisConfigInfo { mut genesis_config, - mint_keypair: _, voting_keypair, + .. } = create_genesis_config_with_leader(10_000, &leader_keypair.pubkey(), 1_000); let slots_in_epoch = 32; genesis_config.epoch_schedule = EpochSchedule::new(slots_in_epoch); diff --git a/runtime/src/bank_utils.rs b/runtime/src/bank_utils.rs index c1021030dd9c6f..771903ba8718ac 100644 --- a/runtime/src/bank_utils.rs +++ b/runtime/src/bank_utils.rs @@ -43,8 +43,8 @@ pub fn find_and_send_votes( sanitized_txs .iter() .zip(execution_results.iter()) - .for_each(|(tx, (result, _nonce))| { - if tx.is_simple_vote_transaction() && result.is_ok() { + .for_each(|(tx, result)| { + if tx.is_simple_vote_transaction() && result.was_executed_successfully() { if let Some(parsed_vote) = vote_transaction::parse_sanitized_vote_transaction(tx) { diff --git a/runtime/src/block_cost_limits.rs b/runtime/src/block_cost_limits.rs index 97be8a062d9c2f..6eead90cf2ab26 100644 --- a/runtime/src/block_cost_limits.rs +++ b/runtime/src/block_cost_limits.rs @@ -11,22 +11,22 @@ use { /// Static configurations: /// /// Number of microseconds replaying a block should take, 400 millisecond block times -/// is curerntly publicly communicated on solana.com +/// is currently publicly communicated on solana.com pub const MAX_BLOCK_REPLAY_TIME_US: u64 = 400_000; /// number of concurrent processes, -pub const MAX_CONCURRENCY: u64 = 10; +pub const MAX_CONCURRENCY: u64 = 4; /// Cluster data, method of collecting at https://github.com/solana-labs/solana/issues/19627 /// Dashboard: https://metrics.solana.com:8889/sources/0/dashboards/10?refresh=Paused&lower=now%28%29%20-%2012h /// -/// cluster avergaed compute unit to microsec conversion rate -pub const COMPUTE_UNIT_TO_US_RATIO: u64 = 40; +/// cluster averaged compute unit to micro-sec conversion rate +pub const COMPUTE_UNIT_TO_US_RATIO: u64 = 30; /// Number of compute units for one signature verification. -pub const SIGNATURE_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 130; +pub const SIGNATURE_COST: u64 = COMPUTE_UNIT_TO_US_RATIO * 24; /// Number of compute units for one write lock pub const WRITE_LOCK_UNITS: u64 = COMPUTE_UNIT_TO_US_RATIO * 10; /// Number of data bytes per compute units -pub const DATA_BYTES_UNITS: u64 = 220 /*bytes per us*/ / COMPUTE_UNIT_TO_US_RATIO; +pub const DATA_BYTES_UNITS: u64 = 550 /*bytes per us*/ / COMPUTE_UNIT_TO_US_RATIO; // Number of compute units for each built-in programs lazy_static! { /// Number of compute units for each built-in programs @@ -37,9 +37,10 @@ lazy_static! { (solana_sdk::stake::config::id(), COMPUTE_UNIT_TO_US_RATIO * 2), (solana_sdk::stake::program::id(), COMPUTE_UNIT_TO_US_RATIO * 25), (solana_config_program::id(), COMPUTE_UNIT_TO_US_RATIO * 15), - (solana_vote_program::id(), COMPUTE_UNIT_TO_US_RATIO * 85), - (secp256k1_program::id(), COMPUTE_UNIT_TO_US_RATIO * 4), - (system_program::id(), COMPUTE_UNIT_TO_US_RATIO * 10), + (solana_vote_program::id(), COMPUTE_UNIT_TO_US_RATIO * 70), + // secp256k1 is executed in banking stage, it should cost similar to sigverify + (secp256k1_program::id(), COMPUTE_UNIT_TO_US_RATIO * 24), + (system_program::id(), COMPUTE_UNIT_TO_US_RATIO * 5), ] .iter() .cloned() @@ -49,12 +50,15 @@ lazy_static! { /// Statically computed data: /// /// Number of compute units that a block is allowed. A block's compute units are -/// accumualted by Transactions added to it; A transaction's compute units are -/// calculated by cost_model, based on transaction's signarures, write locks, -/// data size and built-in and BPF instructinos. +/// accumulated by Transactions added to it; A transaction's compute units are +/// calculated by cost_model, based on transaction's signatures, write locks, +/// data size and built-in and BPF instructions. pub const MAX_BLOCK_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_UNIT_TO_US_RATIO * MAX_CONCURRENCY; /// Number of compute units that a writable account in a block is allowed. The -/// limit is to prevent too many transactions write to same account, threrefore -/// reduce block's paralellism. +/// limit is to prevent too many transactions write to same account, therefore +/// reduce block's parallelism. pub const MAX_WRITABLE_ACCOUNT_UNITS: u64 = MAX_BLOCK_REPLAY_TIME_US * COMPUTE_UNIT_TO_US_RATIO; + +/// max length of account data in a slot (bytes) +pub const MAX_ACCOUNT_DATA_LEN: u64 = 100_000_000; diff --git a/runtime/src/builtins.rs b/runtime/src/builtins.rs index 3299820cf9c93c..87eb17660346db 100644 --- a/runtime/src/builtins.rs +++ b/runtime/src/builtins.rs @@ -172,6 +172,15 @@ fn feature_builtins() -> Vec<(Builtin, Pubkey, ActivationType)> { feature_set::prevent_calling_precompiles_as_programs::id(), ActivationType::RemoveProgram, ), + ( + Builtin::new( + "address_lookup_table_program", + solana_address_lookup_table_program::id(), + solana_address_lookup_table_program::processor::process_instruction, + ), + feature_set::versioned_tx_message_enabled::id(), + ActivationType::NewProgram, + ), ] } diff --git a/runtime/src/cost_model.rs b/runtime/src/cost_model.rs index a69b4c9f41ec09..75d5af43daf10a 100644 --- a/runtime/src/cost_model.rs +++ b/runtime/src/cost_model.rs @@ -106,15 +106,11 @@ impl CostModel { ); } - pub fn calculate_cost( - &self, - transaction: &SanitizedTransaction, - demote_program_write_locks: bool, - ) -> TransactionCost { + pub fn calculate_cost(&self, transaction: &SanitizedTransaction) -> TransactionCost { let mut tx_cost = TransactionCost::new_with_capacity(MAX_WRITABLE_ACCOUNTS); tx_cost.signature_cost = self.get_signature_cost(transaction); - self.get_write_lock_cost(&mut tx_cost, transaction, demote_program_write_locks); + self.get_write_lock_cost(&mut tx_cost, transaction); tx_cost.data_bytes_cost = self.get_data_bytes_cost(transaction); tx_cost.execution_cost = self.get_transaction_cost(transaction); tx_cost.cost_weight = self.calculate_cost_weight(transaction); @@ -140,6 +136,20 @@ impl CostModel { self.instruction_execution_cost_table.get_cost_table() } + pub fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 { + match self.instruction_execution_cost_table.get_cost(program_key) { + Some(cost) => *cost, + None => { + let default_value = self.instruction_execution_cost_table.get_mode(); + debug!( + "Program key {:?} does not have assigned cost, using mode {}", + program_key, default_value + ); + default_value + } + } + } + fn get_signature_cost(&self, transaction: &SanitizedTransaction) -> u64 { transaction.signatures().len() as u64 * SIGNATURE_COST } @@ -148,11 +158,10 @@ impl CostModel { &self, tx_cost: &mut TransactionCost, transaction: &SanitizedTransaction, - demote_program_write_locks: bool, ) { let message = transaction.message(); message.account_keys_iter().enumerate().for_each(|(i, k)| { - let is_writable = message.is_writable(i, demote_program_write_locks); + let is_writable = message.is_writable(i); if is_writable { tx_cost.writable_accounts.push(*k); @@ -187,20 +196,6 @@ impl CostModel { cost } - fn find_instruction_cost(&self, program_key: &Pubkey) -> u64 { - match self.instruction_execution_cost_table.get_cost(program_key) { - Some(cost) => *cost, - None => { - let default_value = self.instruction_execution_cost_table.get_mode(); - debug!( - "Program key {:?} does not have assigned cost, using mode {}", - program_key, default_value - ); - default_value - } - } - } - fn calculate_cost_weight(&self, transaction: &SanitizedTransaction) -> u32 { if is_simple_vote_transaction(transaction) { // vote has zero cost weight, so it bypasses block cost limit checking @@ -381,7 +376,7 @@ mod tests { ); let cost_model = CostModel::default(); - let tx_cost = cost_model.calculate_cost(&tx, /*demote_program_write_locks=*/ true); + let tx_cost = cost_model.calculate_cost(&tx); assert_eq!(2 + 2, tx_cost.writable_accounts.len()); assert_eq!(signer1.pubkey(), tx_cost.writable_accounts[0]); assert_eq!(signer2.pubkey(), tx_cost.writable_accounts[1]); @@ -425,7 +420,7 @@ mod tests { cost_model .upsert_instruction_cost(&system_program::id(), expected_execution_cost) .unwrap(); - let tx_cost = cost_model.calculate_cost(&tx, /*demote_program_write_locks=*/ true); + let tx_cost = cost_model.calculate_cost(&tx); assert_eq!(expected_account_cost, tx_cost.write_lock_cost); assert_eq!(expected_execution_cost, tx_cost.execution_cost); assert_eq!(2, tx_cost.writable_accounts.len()); @@ -494,8 +489,7 @@ mod tests { } else { thread::spawn(move || { let cost_model = cost_model.write().unwrap(); - let tx_cost = cost_model - .calculate_cost(&tx, /*demote_program_write_locks=*/ true); + let tx_cost = cost_model.calculate_cost(&tx); assert_eq!(3, tx_cost.writable_accounts.len()); assert_eq!(expected_account_cost, tx_cost.write_lock_cost); }) diff --git a/runtime/src/genesis_utils.rs b/runtime/src/genesis_utils.rs index 71222fdf441b31..7db7a4f8a07cbd 100644 --- a/runtime/src/genesis_utils.rs +++ b/runtime/src/genesis_utils.rs @@ -52,6 +52,7 @@ pub struct GenesisConfigInfo { pub genesis_config: GenesisConfig, pub mint_keypair: Keypair, pub voting_keypair: Keypair, + pub validator_pubkey: Pubkey, } pub fn create_genesis_config(mint_lamports: u64) -> GenesisConfigInfo { @@ -84,10 +85,11 @@ pub fn create_genesis_config_with_vote_accounts_and_cluster_type( let voting_keypair = Keypair::from_bytes(&voting_keypairs[0].borrow().vote_keypair.to_bytes()).unwrap(); + let validator_pubkey = voting_keypairs[0].borrow().node_keypair.pubkey(); let genesis_config = create_genesis_config_with_leader_ex( mint_lamports, &mint_keypair.pubkey(), - &voting_keypairs[0].borrow().node_keypair.pubkey(), + &validator_pubkey, &voting_keypairs[0].borrow().vote_keypair.pubkey(), &voting_keypairs[0].borrow().stake_keypair.pubkey(), stakes[0], @@ -102,6 +104,7 @@ pub fn create_genesis_config_with_vote_accounts_and_cluster_type( genesis_config, mint_keypair, voting_keypair, + validator_pubkey, }; for (validator_voting_keypairs, stake) in voting_keypairs[1..].iter().zip(&stakes[1..]) { @@ -159,6 +162,7 @@ pub fn create_genesis_config_with_leader( genesis_config, mint_keypair, voting_keypair, + validator_pubkey: *validator_pubkey, } } diff --git a/runtime/src/message_processor.rs b/runtime/src/message_processor.rs index 5d0a7e7211a72e..28fb25351acbd2 100644 --- a/runtime/src/message_processor.rs +++ b/runtime/src/message_processor.rs @@ -3,12 +3,15 @@ use { solana_measure::measure::Measure, solana_program_runtime::{ instruction_recorder::InstructionRecorder, - invoke_context::{BuiltinProgram, Executors, InvokeContext}, + invoke_context::{ + BuiltinProgram, Executors, InvokeContext, ProcessInstructionResult, + TransactionAccountRefCell, + }, log_collector::LogCollector, timings::ExecuteDetailsTimings, }, solana_sdk::{ - account::{AccountSharedData, WritableAccount}, + account::WritableAccount, compute_budget::ComputeBudget, feature_set::{prevent_calling_precompiles_as_programs, FeatureSet}, hash::Hash, @@ -34,6 +37,13 @@ impl ::solana_frozen_abi::abi_example::AbiExample for MessageProcessor { } } +/// Resultant information gathered from calling process_message() +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub struct ProcessedMessageInfo { + /// The amount that the accounts data len has changed + pub accounts_data_len_delta: i64, +} + impl MessageProcessor { /// Process a message. /// This method calls each instruction in the message over the set of loaded accounts. @@ -45,7 +55,7 @@ impl MessageProcessor { builtin_programs: &[BuiltinProgram], message: &Message, program_indices: &[Vec], - accounts: &[(Pubkey, Rc>)], + accounts: &[TransactionAccountRefCell], rent: Rent, log_collector: Option>>, executors: Rc>, @@ -56,7 +66,7 @@ impl MessageProcessor { sysvars: &[(Pubkey, Vec)], blockhash: Hash, lamports_per_signature: u64, - ) -> Result<(), TransactionError> { + ) -> Result { let mut invoke_context = InvokeContext::new( rent, accounts, @@ -89,9 +99,9 @@ impl MessageProcessor { // Fixup the special instructions key if present // before the account pre-values are taken care of - for (pubkey, accont) in accounts.iter().take(message.account_keys.len()) { + for (pubkey, account) in accounts.iter().take(message.account_keys.len()) { if instructions::check_id(pubkey) { - let mut mut_account_ref = accont.borrow_mut(); + let mut mut_account_ref = account.borrow_mut(); instructions::store_current_index( mut_account_ref.data_as_mut_slice(), instruction_index as u16, @@ -104,33 +114,23 @@ impl MessageProcessor { invoke_context.instruction_recorder = Some(&instruction_recorders[instruction_index]); } - let result = invoke_context - .push(message, instruction, program_indices, None) - .and_then(|_| { - let pre_remaining_units = - invoke_context.get_compute_meter().borrow().get_remaining(); - let mut time = Measure::start("execute_instruction"); - - invoke_context.process_instruction(&instruction.data)?; - invoke_context.verify(message, instruction, program_indices)?; - - time.stop(); - let post_remaining_units = - invoke_context.get_compute_meter().borrow().get_remaining(); - timings.accumulate_program( - instruction.program_id(&message.account_keys), - time.as_us(), - pre_remaining_units - post_remaining_units, - ); - timings.accumulate(&invoke_context.timings); - Ok(()) - }) - .map_err(|err| TransactionError::InstructionError(instruction_index as u8, err)); - invoke_context.pop(); - - result?; + let mut time = Measure::start("execute_instruction"); + let ProcessInstructionResult { + compute_units_consumed, + result, + } = invoke_context.process_instruction(message, instruction, program_indices, &[], &[]); + time.stop(); + timings.accumulate_program( + instruction.program_id(&message.account_keys), + time.as_us(), + compute_units_consumed, + result.is_err(), + ); + timings.accumulate(&invoke_context.timings); + result + .map_err(|err| TransactionError::InstructionError(instruction_index as u8, err))?; } - Ok(()) + Ok(ProcessedMessageInfo::default()) } } @@ -140,7 +140,7 @@ mod tests { super::*, crate::rent_collector::RentCollector, solana_sdk::{ - account::ReadableAccount, + account::{AccountSharedData, ReadableAccount}, instruction::{AccountMeta, Instruction, InstructionError}, keyed_account::keyed_account_at_index, message::Message, @@ -256,7 +256,7 @@ mod tests { Hash::default(), 0, ); - assert_eq!(result, Ok(())); + assert!(result.is_ok()); assert_eq!(accounts[0].1.borrow().lamports(), 100); assert_eq!(accounts[1].1.borrow().lamports(), 0); @@ -495,7 +495,7 @@ mod tests { Hash::default(), 0, ); - assert_eq!(result, Ok(())); + assert!(result.is_ok()); // Do work on the same account but at different location in keyed_accounts[] let message = Message::new( @@ -525,7 +525,7 @@ mod tests { Hash::default(), 0, ); - assert_eq!(result, Ok(())); + assert!(result.is_ok()); assert_eq!(accounts[0].1.borrow().lamports(), 80); assert_eq!(accounts[1].1.borrow().lamports(), 20); assert_eq!(accounts[0].1.borrow().data(), &vec![42]); diff --git a/runtime/src/serde_snapshot.rs b/runtime/src/serde_snapshot.rs index 600d8b823a2d79..adf59e00a68488 100644 --- a/runtime/src/serde_snapshot.rs +++ b/runtime/src/serde_snapshot.rs @@ -5,7 +5,7 @@ use { accounts::Accounts, accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, AppendVecId, - BankHashInfo, + BankHashInfo, IndexGenerationInfo, }, accounts_index::AccountSecondaryIndexes, accounts_update_notifier_interface::AccountsUpdateNotifier, @@ -334,7 +334,7 @@ fn reconstruct_bank_from_fields( where E: SerializableStorage + std::marker::Sync, { - let accounts_db = reconstruct_accountsdb_from_fields( + let (accounts_db, reconstructed_accounts_db_info) = reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields, account_paths, unpacked_append_vec_map, @@ -359,6 +359,7 @@ where debug_keys, additional_builtins, debug_do_not_add_builtins, + reconstructed_accounts_db_info.accounts_data_len, ); info!("rent_collector: {:?}", bank.rent_collector()); @@ -386,6 +387,12 @@ where Ok(()) } +/// This struct contains side-info while reconstructing the accounts DB from fields. +#[derive(Debug, Default, Copy, Clone)] +struct ReconstructedAccountsDbInfo { + accounts_data_len: u64, +} + #[allow(clippy::too_many_arguments)] fn reconstruct_accountsdb_from_fields( snapshot_accounts_db_fields: SnapshotAccountsDbFields, @@ -399,7 +406,7 @@ fn reconstruct_accountsdb_from_fields( verify_index: bool, accounts_db_config: Option, accounts_update_notifier: Option, -) -> Result +) -> Result<(AccountsDb, ReconstructedAccountsDbInfo), Error> where E: SerializableStorage + std::marker::Sync, { @@ -536,11 +543,12 @@ where }) .unwrap(); - accounts_db.generate_index( + let IndexGenerationInfo { accounts_data_len } = accounts_db.generate_index( limit_load_slot_count_from_snapshot, verify_index, genesis_config, ); + accounts_db.maybe_add_filler_accounts(&genesis_config.epoch_schedule); handle.join().unwrap(); @@ -557,5 +565,8 @@ where ("accountsdb-notify-at-start-us", measure_notify.as_us(), i64), ); - Ok(Arc::try_unwrap(accounts_db).unwrap()) + Ok(( + Arc::try_unwrap(accounts_db).unwrap(), + ReconstructedAccountsDbInfo { accounts_data_len }, + )) } diff --git a/runtime/src/serde_snapshot/future.rs b/runtime/src/serde_snapshot/future.rs index 8fd4a9a455ff04..bc279e355148b3 100644 --- a/runtime/src/serde_snapshot/future.rs +++ b/runtime/src/serde_snapshot/future.rs @@ -2,9 +2,9 @@ use solana_frozen_abi::abi_example::IgnoreAsHelper; use { super::{common::UnusedAccounts, *}, - crate::ancestors::AncestorsForSerialization, + crate::{ancestors::AncestorsForSerialization, stakes::StakesCache}, solana_measure::measure::Measure, - std::cell::RefCell, + std::{cell::RefCell, sync::RwLock}, }; type AccountsDbFields = super::AccountsDbFields; @@ -42,7 +42,6 @@ impl From<&AccountStorageEntry> for SerializableAccountStorageEntry { } } -use std::sync::RwLock; // Deserializable version of Bank which need not be serializable, // because it's handled by SerializableVersionedBank. // So, sync fields with it! @@ -153,7 +152,7 @@ pub(crate) struct SerializableVersionedBank<'a> { pub(crate) rent_collector: RentCollector, pub(crate) epoch_schedule: EpochSchedule, pub(crate) inflation: Inflation, - pub(crate) stakes: &'a RwLock, + pub(crate) stakes: &'a StakesCache, pub(crate) unused_accounts: UnusedAccounts, pub(crate) epoch_stakes: &'a HashMap, pub(crate) is_delta: bool, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 5de81e824280c1..c4975827bdff63 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -89,6 +89,7 @@ where Some(crate::accounts_db::ACCOUNTS_DB_CONFIG_FOR_TESTING), None, ) + .map(|(accounts_db, _)| accounts_db) } #[cfg(test)] @@ -312,7 +313,7 @@ mod test_bank_serialize { // This some what long test harness is required to freeze the ABI of // Bank's serialization due to versioned nature - #[frozen_abi(digest = "Fv5AFJSnZi9sssiE7Jn8bH2iTPnqu3UNc3np62r1sTsr")] + #[frozen_abi(digest = "EuYcD3JCEWRnQaFHW1CAy2bBqLkakc88iLJtZH6kYeVF")] #[derive(Serialize, AbiExample)] pub struct BankAbiTestWrapperFuture { #[serde(serialize_with = "wrapper_future")] diff --git a/runtime/src/snapshot_utils.rs b/runtime/src/snapshot_utils.rs index d33ca432842f66..84f2aefe4c5eae 100644 --- a/runtime/src/snapshot_utils.rs +++ b/runtime/src/snapshot_utils.rs @@ -48,6 +48,7 @@ pub const SNAPSHOT_STATUS_CACHE_FILE_NAME: &str = "status_cache"; pub const DEFAULT_FULL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = 100_000; pub const DEFAULT_INCREMENTAL_SNAPSHOT_ARCHIVE_INTERVAL_SLOTS: Slot = 100; const MAX_SNAPSHOT_DATA_FILE_SIZE: u64 = 32 * 1024 * 1024 * 1024; // 32 GiB +const MAX_SNAPSHOT_VERSION_FILE_SIZE: u64 = 8; // byte const VERSION_STRING_V1_2_0: &str = "1.2.0"; const DEFAULT_SNAPSHOT_VERSION: SnapshotVersion = SnapshotVersion::V1_2_0; pub(crate) const TMP_BANK_SNAPSHOT_PREFIX: &str = "tmp-bank-snapshot-"; @@ -964,12 +965,7 @@ where info!("{}", measure_untar); let unpacked_version_file = unpack_dir.path().join("version"); - let snapshot_version = { - let mut snapshot_version = String::new(); - File::open(unpacked_version_file) - .and_then(|mut f| f.read_to_string(&mut snapshot_version))?; - snapshot_version.trim().to_string() - }; + let snapshot_version = snapshot_version_from_file(&unpacked_version_file)?; Ok(UnarchivedSnapshot { unpack_dir, @@ -982,6 +978,28 @@ where }) } +/// Reads the `snapshot_version` from a file. Before opening the file, its size +/// is compared to `MAX_SNAPSHOT_VERSION_FILE_SIZE`. If the size exceeds this +/// threshold, it is not opened and an error is returned. +fn snapshot_version_from_file(path: impl AsRef) -> Result { + // Check file size. + let file_size = fs::metadata(&path)?.len(); + if file_size > MAX_SNAPSHOT_VERSION_FILE_SIZE { + let error_message = format!( + "snapshot version file too large: {} has {} bytes (max size is {} bytes)", + path.as_ref().display(), + file_size, + MAX_SNAPSHOT_VERSION_FILE_SIZE, + ); + return Err(get_io_error(&error_message)); + } + + // Read snapshot_version from file. + let mut snapshot_version = String::new(); + File::open(path).and_then(|mut f| f.read_to_string(&mut snapshot_version))?; + Ok(snapshot_version.trim().to_string()) +} + /// Check if an incremental snapshot is compatible with a full snapshot. This is done by checking /// if the incremental snapshot's base slot is the same as the full snapshot's slot. fn check_are_snapshots_compatible( @@ -1863,7 +1881,8 @@ mod tests { system_transaction, transaction::SanitizedTransaction, }, - std::mem::size_of, + std::{convert::TryFrom, mem::size_of}, + tempfile::NamedTempFile, }; #[test] @@ -1998,6 +2017,27 @@ mod tests { assert_matches!(result, Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("invalid snapshot data file")); } + #[test] + fn test_snapshot_version_from_file_under_limit() { + let file_content = format!("v{}", DEFAULT_SNAPSHOT_VERSION); + let mut file = NamedTempFile::new().unwrap(); + file.write_all(file_content.as_bytes()).unwrap(); + let version_from_file = snapshot_version_from_file(file.path()).unwrap(); + assert_eq!(version_from_file, file_content); + } + + #[test] + fn test_snapshot_version_from_file_over_limit() { + let over_limit_size = usize::try_from(MAX_SNAPSHOT_VERSION_FILE_SIZE + 1).unwrap(); + let file_content = vec![7u8; over_limit_size]; + let mut file = NamedTempFile::new().unwrap(); + file.write_all(&file_content).unwrap(); + assert_matches!( + snapshot_version_from_file(file.path()), + Err(SnapshotError::Io(ref message)) if message.to_string().starts_with("snapshot version file too large") + ); + } + #[test] fn test_parse_full_snapshot_archive_filename() { assert_eq!( diff --git a/runtime/src/stake_delegations.rs b/runtime/src/stake_delegations.rs index aedd0272055a6a..5dc69b4b7b0920 100644 --- a/runtime/src/stake_delegations.rs +++ b/runtime/src/stake_delegations.rs @@ -35,6 +35,7 @@ type StakeDelegationsInner = HashMap; mod tests { use super::*; + /// Ensure that StakeDelegations is indeed clone-on-write #[test] fn test_stake_delegations_is_cow() { let voter_pubkey = Pubkey::new_unique(); @@ -83,4 +84,45 @@ mod tests { ); } } + + /// Ensure that StakeDelegations serializes and deserializes between the inner and outer types + #[test] + fn test_stake_delegations_serde() { + let voter_pubkey = Pubkey::new_unique(); + let stake = rand::random(); + let activation_epoch = rand::random(); + let warmup_cooldown_rate = rand::random(); + let delegation = + Delegation::new(&voter_pubkey, stake, activation_epoch, warmup_cooldown_rate); + + let pubkey = Pubkey::new_unique(); + + let mut stake_delegations_outer = StakeDelegations::default(); + stake_delegations_outer.insert(pubkey, delegation); + + let mut stake_delegations_inner = StakeDelegationsInner::default(); + stake_delegations_inner.insert(pubkey, delegation); + + // Test: Assert that serializing the outer and inner types produces the same data + assert_eq!( + bincode::serialize(&stake_delegations_outer).unwrap(), + bincode::serialize(&stake_delegations_inner).unwrap(), + ); + + // Test: Assert that serializing the outer type then deserializing to the inner type + // produces the same values + { + let data = bincode::serialize(&stake_delegations_outer).unwrap(); + let deserialized_inner: StakeDelegationsInner = bincode::deserialize(&data).unwrap(); + assert_eq!(&deserialized_inner, stake_delegations_outer.deref()); + } + + // Test: Assert that serializing the inner type then deserializing to the outer type + // produces the same values + { + let data = bincode::serialize(&stake_delegations_inner).unwrap(); + let deserialized_outer: StakeDelegations = bincode::deserialize(&data).unwrap(); + assert_eq!(deserialized_outer.deref(), &stake_delegations_inner); + } + } } diff --git a/runtime/src/stake_history.rs b/runtime/src/stake_history.rs index fbcf7ab9b55e72..27c7674008daf5 100644 --- a/runtime/src/stake_history.rs +++ b/runtime/src/stake_history.rs @@ -38,6 +38,7 @@ mod tests { } } + /// Ensure that StakeHistory is indeed clone-on-write #[test] fn test_stake_history_is_cow() { let mut stake_history = StakeHistory::default(); @@ -81,4 +82,38 @@ mod tests { ); } } + + /// Ensure that StakeHistory serializes and deserializes between the inner and outer types + #[test] + fn test_stake_history_serde() { + let mut stake_history_outer = StakeHistory::default(); + let mut stake_history_inner = StakeHistoryInner::default(); + (2134..).take(11).for_each(|epoch| { + let entry = rand_stake_history_entry(); + stake_history_outer.add(epoch, entry.clone()); + stake_history_inner.add(epoch, entry); + }); + + // Test: Assert that serializing the outer and inner types produces the same data + assert_eq!( + bincode::serialize(&stake_history_outer).unwrap(), + bincode::serialize(&stake_history_inner).unwrap(), + ); + + // Test: Assert that serializing the outer type then deserializing to the inner type + // produces the same values + { + let data = bincode::serialize(&stake_history_outer).unwrap(); + let deserialized_inner: StakeHistoryInner = bincode::deserialize(&data).unwrap(); + assert_eq!(&deserialized_inner, stake_history_outer.deref()); + } + + // Test: Assert that serializing the inner type then deserializing to the outer type + // produces the same values + { + let data = bincode::serialize(&stake_history_inner).unwrap(); + let deserialized_outer: StakeHistory = bincode::deserialize(&data).unwrap(); + assert_eq!(deserialized_outer.deref(), &stake_history_inner); + } + } } diff --git a/runtime/src/stakes.rs b/runtime/src/stakes.rs index c58f0c2b93f965..970889e3d18516 100644 --- a/runtime/src/stakes.rs +++ b/runtime/src/stakes.rs @@ -6,13 +6,16 @@ use { stake_history::StakeHistory, vote_account::{VoteAccount, VoteAccounts, VoteAccountsHashMap}, }, + dashmap::DashMap, + num_derive::ToPrimitive, + num_traits::ToPrimitive, rayon::{ iter::{IntoParallelRefIterator, ParallelIterator}, ThreadPool, }, solana_sdk::{ account::{AccountSharedData, ReadableAccount}, - clock::Epoch, + clock::{Epoch, Slot}, pubkey::Pubkey, stake::{ self, @@ -21,9 +24,139 @@ use { }, solana_stake_program::stake_state, solana_vote_program::vote_state::VoteState, - std::{collections::HashMap, sync::Arc}, + std::{ + collections::HashMap, + sync::{Arc, RwLock, RwLockReadGuard}, + }, }; +#[derive(Debug, Clone, PartialEq, ToPrimitive)] +pub enum InvalidCacheEntryReason { + Missing, + BadState, + WrongOwner, +} + +#[derive(Default, Debug, Deserialize, Serialize, AbiExample)] +pub struct StakesCache(RwLock); + +impl StakesCache { + pub fn new(stakes: Stakes) -> Self { + Self(RwLock::new(stakes)) + } + + pub fn stakes(&self) -> RwLockReadGuard { + self.0.read().unwrap() + } + + pub fn is_stake(account: &AccountSharedData) -> bool { + solana_vote_program::check_id(account.owner()) + || stake::program::check_id(account.owner()) + && account.data().len() >= std::mem::size_of::() + } + + pub fn check_and_store( + &self, + pubkey: &Pubkey, + account: &AccountSharedData, + remove_delegation_on_inactive: bool, + ) { + if solana_vote_program::check_id(account.owner()) { + let new_vote_account = if account.lamports() != 0 + && VoteState::is_correct_size_and_initialized(account.data()) + { + let vote_account = VoteAccount::from(account.clone()); + { + // Called to eagerly deserialize vote state + let _res = vote_account.vote_state(); + } + Some(vote_account) + } else { + None + }; + + self.0 + .write() + .unwrap() + .update_vote_account(pubkey, new_vote_account); + } else if solana_stake_program::check_id(account.owner()) { + let new_delegation = stake_state::delegation_from(account).map(|delegation| { + let stakes = self.stakes(); + let stake = if account.lamports() != 0 { + delegation.stake(stakes.epoch, Some(&stakes.stake_history)) + } else { + // when account is removed (lamports == 0), this special `else` clause ensures + // resetting cached stake value below, even if the account happens to be + // still staked for some (odd) reason + 0 + }; + (stake, delegation) + }); + + let remove_delegation = if remove_delegation_on_inactive { + new_delegation.is_none() + } else { + account.lamports() == 0 + }; + + self.0.write().unwrap().update_stake_delegation( + pubkey, + new_delegation, + remove_delegation, + ); + } + } + + pub fn activate_epoch(&self, next_epoch: Epoch, thread_pool: &ThreadPool) { + let mut stakes = self.0.write().unwrap(); + stakes.activate_epoch(next_epoch, thread_pool) + } + + pub fn handle_invalid_keys( + &self, + invalid_stake_keys: DashMap, + invalid_vote_keys: DashMap, + should_evict_invalid_entries: bool, + current_slot: Slot, + ) { + if invalid_stake_keys.is_empty() && invalid_vote_keys.is_empty() { + return; + } + + // Prune invalid stake delegations and vote accounts that were + // not properly evicted in normal operation. + let mut maybe_stakes = if should_evict_invalid_entries { + Some(self.0.write().unwrap()) + } else { + None + }; + + for (stake_pubkey, reason) in invalid_stake_keys { + if let Some(stakes) = maybe_stakes.as_mut() { + stakes.remove_stake_delegation(&stake_pubkey); + } + datapoint_warn!( + "bank-stake_delegation_accounts-invalid-account", + ("slot", current_slot as i64, i64), + ("stake-address", format!("{:?}", stake_pubkey), String), + ("reason", reason.to_i64().unwrap_or_default(), i64), + ); + } + + for (vote_pubkey, reason) in invalid_vote_keys { + if let Some(stakes) = maybe_stakes.as_mut() { + stakes.remove_vote_account(&vote_pubkey); + } + datapoint_warn!( + "bank-stake_delegation_accounts-invalid-account", + ("slot", current_slot as i64, i64), + ("vote-address", format!("{:?}", vote_pubkey), String), + ("reason", reason.to_i64().unwrap_or_default(), i64), + ); + } + } +} + #[derive(Default, Clone, PartialEq, Debug, Deserialize, Serialize, AbiExample)] pub struct Stakes { /// vote accounts @@ -144,89 +277,72 @@ impl Stakes { + self.vote_accounts.iter().map(get_lamports).sum::() } - pub fn is_stake(account: &AccountSharedData) -> bool { - solana_vote_program::check_id(account.owner()) - || stake::program::check_id(account.owner()) - && account.data().len() >= std::mem::size_of::() + pub fn remove_vote_account(&mut self, vote_pubkey: &Pubkey) { + self.vote_accounts.remove(vote_pubkey); } - pub fn store( + pub fn remove_stake_delegation(&mut self, stake_pubkey: &Pubkey) { + if let Some(removed_delegation) = self.stake_delegations.remove(stake_pubkey) { + let removed_stake = removed_delegation.stake(self.epoch, Some(&self.stake_history)); + self.vote_accounts + .sub_stake(&removed_delegation.voter_pubkey, removed_stake); + } + } + + pub fn update_vote_account( &mut self, - pubkey: &Pubkey, - account: &AccountSharedData, - remove_delegation_on_inactive: bool, + vote_pubkey: &Pubkey, + new_vote_account: Option, ) { - if solana_vote_program::check_id(account.owner()) { - // unconditionally remove existing at first; there is no dependent calculated state for - // votes, not like stakes (stake codepath maintains calculated stake value grouped by - // delegated vote pubkey) - let old = self.vote_accounts.remove(pubkey); - // when account is removed (lamports == 0 or data uninitialized), don't read so that - // given `pubkey` can be used for any owner in the future, while not affecting Stakes. - if account.lamports() != 0 && VoteState::is_correct_size_and_initialized(account.data()) - { - let stake = old.as_ref().map_or_else( - || self.calculate_stake(pubkey, self.epoch, &self.stake_history), - |v| v.0, - ); - - self.vote_accounts - .insert(*pubkey, (stake, VoteAccount::from(account.clone()))); - } - } else if stake::program::check_id(account.owner()) { - // old_stake is stake lamports and voter_pubkey from the pre-store() version - let old_stake = self.stake_delegations.get(pubkey).map(|delegation| { - ( - delegation.voter_pubkey, - delegation.stake(self.epoch, Some(&self.stake_history)), - ) - }); + // unconditionally remove existing at first; there is no dependent calculated state for + // votes, not like stakes (stake codepath maintains calculated stake value grouped by + // delegated vote pubkey) + let old_entry = self.vote_accounts.remove(vote_pubkey); + if let Some(new_vote_account) = new_vote_account { + debug_assert!(new_vote_account.is_deserialized()); + let new_stake = old_entry.as_ref().map_or_else( + || self.calculate_stake(vote_pubkey, self.epoch, &self.stake_history), + |(old_stake, _old_vote_account)| *old_stake, + ); - let delegation = stake_state::delegation_from(account); - - let stake = delegation.map(|delegation| { - ( - delegation.voter_pubkey, - if account.lamports() != 0 { - delegation.stake(self.epoch, Some(&self.stake_history)) - } else { - // when account is removed (lamports == 0), this special `else` clause ensures - // resetting cached stake value below, even if the account happens to be - // still staked for some (odd) reason - 0 - }, - ) - }); + self.vote_accounts + .insert(*vote_pubkey, (new_stake, new_vote_account)); + } + } - // if adjustments need to be made... - if stake != old_stake { - if let Some((voter_pubkey, stake)) = old_stake { - self.vote_accounts.sub_stake(&voter_pubkey, stake); - } - if let Some((voter_pubkey, stake)) = stake { - self.vote_accounts.add_stake(&voter_pubkey, stake); - } - } + pub fn update_stake_delegation( + &mut self, + stake_pubkey: &Pubkey, + new_delegation: Option<(u64, Delegation)>, + remove_delegation: bool, + ) { + // old_stake is stake lamports and voter_pubkey from the pre-store() version + let old_stake = self.stake_delegations.get(stake_pubkey).map(|delegation| { + ( + delegation.voter_pubkey, + delegation.stake(self.epoch, Some(&self.stake_history)), + ) + }); - let remove_delegation = if remove_delegation_on_inactive { - delegation.is_none() - } else { - account.lamports() == 0 - }; + let new_stake = new_delegation.map(|(stake, delegation)| (delegation.voter_pubkey, stake)); - if remove_delegation { - // when account is removed (lamports == 0), remove it from Stakes as well - // so that given `pubkey` can be used for any owner in the future, while not - // affecting Stakes. - self.stake_delegations.remove(pubkey); - } else if let Some(delegation) = delegation { - self.stake_delegations.insert(*pubkey, delegation); + // check if adjustments need to be made... + if new_stake != old_stake { + if let Some((voter_pubkey, stake)) = old_stake { + self.vote_accounts.sub_stake(&voter_pubkey, stake); } - } else { - // there is no need to remove possibly existing Stakes cache entries with given - // `pubkey` because this isn't possible, first of all. - // Runtime always enforces an intermediary write of account.lamports == 0, - // when not-System111-owned account.owner is swapped. + if let Some((voter_pubkey, stake)) = new_stake { + self.vote_accounts.add_stake(&voter_pubkey, stake); + } + } + + if remove_delegation { + // when account is removed (lamports == 0), remove it from Stakes as well + // so that given `pubkey` can be used for any owner in the future, while not + // affecting Stakes. + self.stake_delegations.remove(stake_pubkey); + } else if let Some((_stake, delegation)) = new_delegation { + self.stake_delegations.insert(*stake_pubkey, delegation); } } @@ -326,18 +442,19 @@ pub mod tests { #[test] fn test_stakes_basic() { for i in 0..4 { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: i, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, mut stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -347,8 +464,9 @@ pub mod tests { } stake_account.set_lamports(42); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -359,9 +477,10 @@ pub mod tests { // activate more let (_stake_pubkey, mut stake_account) = create_stake_account(42, &vote_pubkey); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -371,8 +490,9 @@ pub mod tests { } stake_account.set_lamports(0); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0); @@ -382,58 +502,62 @@ pub mod tests { #[test] fn test_stakes_highest() { - let mut stakes = Stakes::default(); + let stakes_cache = StakesCache::default(); - assert_eq!(stakes.highest_staked_node(), None); + assert_eq!(stakes_cache.stakes().highest_staked_node(), None); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let ((vote11_pubkey, vote11_account), (stake11_pubkey, stake11_account)) = create_staked_node_accounts(20); - stakes.store(&vote11_pubkey, &vote11_account, true); - stakes.store(&stake11_pubkey, &stake11_account, true); + stakes_cache.check_and_store(&vote11_pubkey, &vote11_account, true); + stakes_cache.check_and_store(&stake11_pubkey, &stake11_account, true); let vote11_node_pubkey = VoteState::from(&vote11_account).unwrap().node_pubkey; - assert_eq!(stakes.highest_staked_node(), Some(vote11_node_pubkey)) + let highest_staked_node = stakes_cache.stakes().highest_staked_node(); + assert_eq!(highest_staked_node, Some(vote11_node_pubkey)); } #[test] fn test_stakes_vote_account_disappear_reappear() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, mut vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); } vote_account.set_lamports(0); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_none()); } vote_account.set_lamports(1); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); @@ -444,9 +568,10 @@ pub mod tests { let mut pushed = vote_account.data().to_vec(); pushed.push(0); vote_account.set_data(pushed); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_none()); } @@ -455,17 +580,19 @@ pub mod tests { let default_vote_state = VoteState::default(); let versioned = VoteStateVersions::new_current(default_vote_state); VoteState::to(&versioned, &mut vote_account).unwrap(); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_none()); } vote_account.set_data(cache_data); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); @@ -474,10 +601,10 @@ pub mod tests { #[test] fn test_stakes_change_delegate() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); @@ -485,15 +612,16 @@ pub mod tests { let ((vote_pubkey2, vote_account2), (_stake_pubkey2, stake_account2)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&vote_pubkey2, &vote_account2, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey2, &vote_account2, true); // delegates to vote_pubkey - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!( @@ -505,9 +633,10 @@ pub mod tests { } // delegates to vote_pubkey2 - stakes.store(&stake_pubkey, &stake_account2, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account2, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0); @@ -520,23 +649,24 @@ pub mod tests { } #[test] fn test_stakes_multiple_stakers() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); let (stake_pubkey2, stake_account2) = create_stake_account(10, &vote_pubkey); - stakes.store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); // delegates to vote_pubkey - stakes.store(&stake_pubkey, &stake_account, true); - stakes.store(&stake_pubkey2, &stake_account2, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&stake_pubkey2, &stake_account2, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 20); @@ -545,16 +675,17 @@ pub mod tests { #[test] fn test_activate_epoch() { - let mut stakes = Stakes::default(); + let stakes_cache = StakesCache::default(); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); let stake = stake_state::stake_from(&stake_account).unwrap(); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert_eq!( vote_accounts.get(&vote_pubkey).unwrap().0, @@ -562,8 +693,9 @@ pub mod tests { ); } let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); - stakes.activate_epoch(3, &thread_pool); + stakes_cache.activate_epoch(3, &thread_pool); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert_eq!( vote_accounts.get(&vote_pubkey).unwrap().0, @@ -574,30 +706,32 @@ pub mod tests { #[test] fn test_stakes_not_delegate() { - let mut stakes = Stakes { + let stakes_cache = StakesCache::new(Stakes { epoch: 4, ..Stakes::default() - }; + }); let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_staked_node_accounts(10); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 10); } // not a stake account, and whacks above entry - stakes.store( + stakes_cache.check_and_store( &stake_pubkey, &AccountSharedData::new(1, 0, &stake::program::id()), true, ); { + let stakes = stakes_cache.stakes(); let vote_accounts = stakes.vote_accounts(); assert!(vote_accounts.get(&vote_pubkey).is_some()); assert_eq!(vote_accounts.get(&vote_pubkey).unwrap().0, 0); @@ -612,7 +746,7 @@ pub mod tests { #[test] fn test_vote_balance_and_staked_normal() { - let mut stakes = Stakes::default(); + let stakes_cache = StakesCache::default(); impl Stakes { pub fn vote_balance_and_warmed_staked(&self) -> u64 { self.vote_accounts @@ -625,17 +759,21 @@ pub mod tests { let genesis_epoch = 0; let ((vote_pubkey, vote_account), (stake_pubkey, stake_account)) = create_warming_staked_node_accounts(10, genesis_epoch); - stakes.store(&vote_pubkey, &vote_account, true); - stakes.store(&stake_pubkey, &stake_account, true); + stakes_cache.check_and_store(&vote_pubkey, &vote_account, true); + stakes_cache.check_and_store(&stake_pubkey, &stake_account, true); - assert_eq!(stakes.vote_balance_and_staked(), 11); - assert_eq!(stakes.vote_balance_and_warmed_staked(), 1); + { + let stakes = stakes_cache.stakes(); + assert_eq!(stakes.vote_balance_and_staked(), 11); + assert_eq!(stakes.vote_balance_and_warmed_staked(), 1); + } let thread_pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); for (epoch, expected_warmed_stake) in ((genesis_epoch + 1)..=3).zip(&[2, 3, 4]) { - stakes.activate_epoch(epoch, &thread_pool); + stakes_cache.activate_epoch(epoch, &thread_pool); // vote_balance_and_staked() always remain to return same lamports // while vote_balance_and_warmed_staked() gradually increases + let stakes = stakes_cache.stakes(); assert_eq!(stakes.vote_balance_and_staked(), 11); assert_eq!( stakes.vote_balance_and_warmed_staked(), diff --git a/runtime/src/vote_account.rs b/runtime/src/vote_account.rs index 453e54258f8ca3..daaae5af9abbf6 100644 --- a/runtime/src/vote_account.rs +++ b/runtime/src/vote_account.rs @@ -69,6 +69,10 @@ impl VoteAccount { inner.vote_state.read().unwrap() } + pub fn is_deserialized(&self) -> bool { + self.0.vote_state_once.is_completed() + } + /// VoteState.node_pubkey of this vote-account. fn node_pubkey(&self) -> Option { Some(self.vote_state().as_ref().ok()?.node_pubkey) diff --git a/runtime/store-tool/Cargo.toml b/runtime/store-tool/Cargo.toml index 44ae18ea6c08a1..aef1255828dfac 100644 --- a/runtime/store-tool/Cargo.toml +++ b/runtime/store-tool/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-store-tool" description = "Tool to inspect append vecs" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,9 +11,9 @@ publish = false [dependencies] log = { version = "0.4.14" } -solana-logger = { path = "../../logger", version = "=1.9.0" } -solana-version = { path = "../../version", version = "=1.9.0" } -solana-runtime = { path = "..", version = "=1.9.0" } +solana-logger = { path = "../../logger", version = "=1.9.4" } +solana-version = { path = "../../version", version = "=1.9.4" } +solana-runtime = { path = "..", version = "=1.9.4" } clap = "2.33.1" [package.metadata.docs.rs] diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index b1aa5b075710e1..39f3d5e37a35fa 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -145,8 +145,12 @@ if [[ -d target/perf-libs ]]; then cp -a target/perf-libs "$installDir"/bin/perf-libs fi -mkdir -p "$installDir"/bin/sdk/bpf -cp -a sdk/bpf/* "$installDir"/bin/sdk/bpf +if [[ -z "$validatorOnly" ]]; then + # shellcheck disable=SC2086 # Don't want to double quote $rust_version + "$cargo" $maybeRustVersion build --manifest-path programs/bpf_loader/gen-syscall-list/Cargo.toml + mkdir -p "$installDir"/bin/sdk/bpf + cp -a sdk/bpf/* "$installDir"/bin/sdk/bpf +fi ( set -x diff --git a/scripts/solana-install-update-manifest-keypair.sh b/scripts/solana-install-update-manifest-keypair.sh index 94159bcf48a211..c0af8c1bd32147 100755 --- a/scripts/solana-install-update-manifest-keypair.sh +++ b/scripts/solana-install-update-manifest-keypair.sh @@ -9,7 +9,7 @@ OS=${1:-linux} case "$OS" in osx) - TARGET=x86_64-apple-darwin + TARGET=$(uname -m)-apple-darwin ;; linux) TARGET=x86_64-unknown-linux-gnu diff --git a/sdk/.gitignore b/sdk/.gitignore index 5404b132dba6e1..14bd5d17098f2f 100644 --- a/sdk/.gitignore +++ b/sdk/.gitignore @@ -1,2 +1,4 @@ -/target/ /farf/ +/node_modules/ +/package-lock.json +/target/ diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 8ce62e797182d0..cd9d21d7500b33 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-sdk" -version = "1.9.0" +version = "1.9.4" description = "Solana SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -39,6 +39,7 @@ full = [ [dependencies] assert_matches = { version = "1.5.0", optional = true } bincode = "1.3.3" +bitflags = "1.3.1" bytemuck = { version = "1.7.2", features = ["derive"] } borsh = "0.9.0" base64 = "0.13" @@ -70,13 +71,17 @@ serde_derive = "1.0.103" serde_json = { version = "1.0.72", optional = true } sha2 = "0.9.8" sha3 = { version = "0.9.1", optional = true } -solana-logger = { path = "../logger", version = "=1.9.0", optional = true } -solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" } -solana-program = { path = "program", version = "=1.9.0" } -solana-sdk-macro = { path = "macro", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4", optional = true } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" } +solana-program = { path = "program", version = "=1.9.4" } +solana-sdk-macro = { path = "macro", version = "=1.9.4" } thiserror = "1.0" uriparse = "0.6.3" +wasm-bindgen = "0.2" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +js-sys = "0.3.55" [dev-dependencies] curve25519-dalek = "3.2.0" diff --git a/sdk/benches/serialize_instructions.rs b/sdk/benches/serialize_instructions.rs index 0788bdc5ab0b2a..d780d36f82c56c 100644 --- a/sdk/benches/serialize_instructions.rs +++ b/sdk/benches/serialize_instructions.rs @@ -19,8 +19,6 @@ fn make_instructions() -> Vec { vec![inst; 4] } -const DEMOTE_PROGRAM_WRITE_LOCKS: bool = true; - #[bench] fn bench_bincode_instruction_serialize(b: &mut Bencher) { let instructions = make_instructions(); @@ -36,7 +34,7 @@ fn bench_manual_instruction_serialize(b: &mut Bencher) { SanitizedMessage::try_from(Message::new(&instructions, Some(&Pubkey::new_unique()))) .unwrap(); b.iter(|| { - test::black_box(message.serialize_instructions(DEMOTE_PROGRAM_WRITE_LOCKS)); + test::black_box(message.serialize_instructions()); }); } @@ -55,7 +53,7 @@ fn bench_manual_instruction_deserialize(b: &mut Bencher) { let message = SanitizedMessage::try_from(Message::new(&instructions, Some(&Pubkey::new_unique()))) .unwrap(); - let serialized = message.serialize_instructions(DEMOTE_PROGRAM_WRITE_LOCKS); + let serialized = message.serialize_instructions(); b.iter(|| { for i in 0..instructions.len() { #[allow(deprecated)] @@ -70,7 +68,7 @@ fn bench_manual_instruction_deserialize_single(b: &mut Bencher) { let message = SanitizedMessage::try_from(Message::new(&instructions, Some(&Pubkey::new_unique()))) .unwrap(); - let serialized = message.serialize_instructions(DEMOTE_PROGRAM_WRITE_LOCKS); + let serialized = message.serialize_instructions(); b.iter(|| { #[allow(deprecated)] test::black_box(instructions::load_instruction_at(3, &serialized).unwrap()); diff --git a/sdk/bpf/c/inc/sol/assert.h b/sdk/bpf/c/inc/sol/assert.h index e679e344c26625..2ed662a9b25256 100644 --- a/sdk/bpf/c/inc/sol/assert.h +++ b/sdk/bpf/c/inc/sol/assert.h @@ -32,6 +32,7 @@ if (!(expr)) { \ * Stub functions when building tests */ #include +#include void sol_panic_(const char *file, uint64_t len, uint64_t line, uint64_t column) { printf("Panic in %s at %d:%d\n", file, line, column); diff --git a/sdk/bpf/scripts/install.sh b/sdk/bpf/scripts/install.sh index 0117b235e53214..95159415d27bcd 100755 --- a/sdk/bpf/scripts/install.sh +++ b/sdk/bpf/scripts/install.sh @@ -3,11 +3,21 @@ mkdir -p "$(dirname "$0")"/../dependencies cd "$(dirname "$0")"/../dependencies -if [[ "$(uname)" = Darwin ]]; then - machine=osx -else - machine=linux -fi +unameOut="$(uname -s)" +case "${unameOut}" in + Linux*) + criterion_suffix= + machine=linux;; + Darwin*) + criterion_suffix= + machine=osx;; + MINGW*) + criterion_suffix=-mingw + machine=windows;; + *) + criterion_suffix= + machine=linux +esac download() { declare url="$1/$2/$3" @@ -80,7 +90,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then job="download \ https://github.com/Snaipe/Criterion/releases/download \ $version \ - criterion-$version-$machine-x86_64.tar.bz2 \ + criterion-$version-$machine$criterion_suffix-x86_64.tar.bz2 \ criterion" get $version criterion "$job" ) @@ -92,7 +102,7 @@ if [[ ! -e criterion-$version.md || ! -e criterion ]]; then fi # Install Rust-BPF -version=v1.20 +version=v1.21 if [[ ! -e bpf-tools-$version.md || ! -e bpf-tools ]]; then ( set -e diff --git a/sdk/cargo-build-bpf/Cargo.toml b/sdk/cargo-build-bpf/Cargo.toml index 8901e09ff4bda9..f8b1fe9263364c 100644 --- a/sdk/cargo-build-bpf/Cargo.toml +++ b/sdk/cargo-build-bpf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-build-bpf" -version = "1.9.0" +version = "1.9.4" description = "Compile a local package and all of its dependencies using the Solana BPF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,8 +14,8 @@ bzip2 = "0.4.3" clap = "2.33.3" regex = "1.5.4" cargo_metadata = "0.14.1" -solana-sdk = { path = "..", version = "=1.9.0" } -solana-download-utils = { path = "../../download-utils", version = "=1.9.0" } +solana-sdk = { path = "..", version = "=1.9.4" } +solana-download-utils = { path = "../../download-utils", version = "=1.9.4" } tar = "0.4.37" [dev-dependencies] diff --git a/sdk/cargo-build-bpf/src/main.rs b/sdk/cargo-build-bpf/src/main.rs index d18ffaff6fe574..ba9358a16a8cec 100644 --- a/sdk/cargo-build-bpf/src/main.rs +++ b/sdk/cargo-build-bpf/src/main.rs @@ -462,7 +462,9 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m if legacy_program_feature_present { println!("Legacy program feature detected"); } - let bpf_tools_download_file_name = if cfg!(target_os = "macos") { + let bpf_tools_download_file_name = if cfg!(target_os = "windows") { + "solana-bpf-tools-windows.tar.bz2" + } else if cfg!(target_os = "macos") { "solana-bpf-tools-osx.tar.bz2" } else { "solana-bpf-tools-linux.tar.bz2" @@ -472,7 +474,7 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m eprintln!("Can't get home directory path: {}", err); exit(1); })); - let version = "v1.20"; + let version = "v1.21"; let package = "bpf-tools"; let target_path = home_dir .join(".cache") @@ -514,16 +516,21 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m env::set_var("AR", llvm_bin.join("llvm-ar")); env::set_var("OBJDUMP", llvm_bin.join("llvm-objdump")); env::set_var("OBJCOPY", llvm_bin.join("llvm-objcopy")); - let rustflags = match env::var("RUSTFLAGS") { + const RF_LTO: &str = "-C lto=no"; + let mut rustflags = match env::var("RUSTFLAGS") { Ok(rf) => { - if rf.contains("-C lto=no") { + if rf.contains(&RF_LTO) { rf } else { - rf + &" -C lto=no".to_string() + format!("{} {}", rf, RF_LTO) } } - _ => "-C lto=no".to_string(), + _ => RF_LTO.to_string(), }; + if cfg!(windows) && !rustflags.contains("-C linker=") { + let ld_path = llvm_bin.join("ld.lld"); + rustflags = format!("{} -C linker={}", rustflags, ld_path.display()); + } if config.verbose { println!("RUSTFLAGS={}", rustflags); } @@ -603,6 +610,17 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m } if file_older_or_missing(&program_unstripped_so, &program_so) { + #[cfg(windows)] + let output = spawn( + &llvm_bin.join("llvm-objcopy"), + &[ + "--strip-all".as_ref(), + program_unstripped_so.as_os_str(), + program_so.as_os_str(), + ], + config.generate_child_script_on_failure, + ); + #[cfg(not(windows))] let output = spawn( &config.bpf_sdk.join("scripts").join("strip.sh"), &[&program_unstripped_so, &program_so], @@ -614,13 +632,26 @@ fn build_bpf_package(config: &Config, target_directory: &Path, package: &cargo_m } if config.dump && file_older_or_missing(&program_unstripped_so, &program_dump) { - let output = spawn( - &config.bpf_sdk.join("scripts").join("dump.sh"), - &[&program_unstripped_so, &program_dump], - config.generate_child_script_on_failure, - ); - if config.verbose { - println!("{}", output); + let dump_script = config.bpf_sdk.join("scripts").join("dump.sh"); + #[cfg(windows)] + { + eprintln!("Using Bash scripts from within a program is not supported on Windows, skipping `--dump`."); + eprintln!( + "Please run \"{} {} {}\" from a Bash-supporting shell, then re-run this command to see the processed program dump.", + &dump_script.display(), + &program_unstripped_so.display(), + &program_dump.display()); + } + #[cfg(not(windows))] + { + let output = spawn( + &dump_script, + &[&program_unstripped_so, &program_dump], + config.generate_child_script_on_failure, + ); + if config.verbose { + println!("{}", output); + } } postprocess_dump(&program_dump); } @@ -679,10 +710,6 @@ fn build_bpf(config: Config, manifest_path: Option) { } fn main() { - if cfg!(windows) { - println!("Solana Rust BPF toolchain is not available on Windows"); - exit(1); - } let default_config = Config::default(); let default_bpf_sdk = format!("{}", default_config.bpf_sdk.display()); diff --git a/sdk/cargo-build-bpf/tests/crates/fail/Cargo.toml b/sdk/cargo-build-bpf/tests/crates/fail/Cargo.toml index 57babe99c0b056..a8fe4326351115 100644 --- a/sdk/cargo-build-bpf/tests/crates/fail/Cargo.toml +++ b/sdk/cargo-build-bpf/tests/crates/fail/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fail" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.9.0" } +solana-program = { path = "../../../../program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-build-bpf/tests/crates/noop/Cargo.toml b/sdk/cargo-build-bpf/tests/crates/noop/Cargo.toml index b9741b2d4e25ff..9e5e8f798fc9b7 100644 --- a/sdk/cargo-build-bpf/tests/crates/noop/Cargo.toml +++ b/sdk/cargo-build-bpf/tests/crates/noop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "noop" -version = "1.9.0" +version = "1.9.4" description = "Solana BPF test program written in Rust" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -10,7 +10,7 @@ edition = "2021" publish = false [dependencies] -solana-program = { path = "../../../../program", version = "=1.9.0" } +solana-program = { path = "../../../../program", version = "=1.9.4" } [lib] crate-type = ["cdylib"] diff --git a/sdk/cargo-test-bpf/Cargo.toml b/sdk/cargo-test-bpf/Cargo.toml index 9af42cff73624a..701c98d118097b 100644 --- a/sdk/cargo-test-bpf/Cargo.toml +++ b/sdk/cargo-test-bpf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-cargo-test-bpf" -version = "1.9.0" +version = "1.9.4" description = "Execute all unit and integration tests after building with the Solana BPF SDK" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/macro/Cargo.toml b/sdk/macro/Cargo.toml index e9d89f46f68313..0edead653ceda0 100644 --- a/sdk/macro/Cargo.toml +++ b/sdk/macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-sdk-macro" -version = "1.9.0" +version = "1.9.4" description = "Solana SDK Macro" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" diff --git a/sdk/macro/src/lib.rs b/sdk/macro/src/lib.rs index 8ffb6f54673cb9..7c240f4c7598b1 100644 --- a/sdk/macro/src/lib.rs +++ b/sdk/macro/src/lib.rs @@ -373,3 +373,31 @@ pub fn pubkeys(input: TokenStream) -> TokenStream { let pubkeys = parse_macro_input!(input as Pubkeys); TokenStream::from(quote! {#pubkeys}) } + +// The normal `wasm_bindgen` macro generates a .bss section which causes the resulting +// BPF program to fail to load, so for now this stub should be used when building for BPF +#[proc_macro_attribute] +pub fn wasm_bindgen_stub(_attr: TokenStream, item: TokenStream) -> TokenStream { + match parse_macro_input!(item as syn::Item) { + syn::Item::Struct(mut item_struct) => { + if let syn::Fields::Named(fields) = &mut item_struct.fields { + // Strip out any `#[wasm_bindgen]` added to struct fields. This is custom + // syntax supplied by the normal `wasm_bindgen` macro. + for field in fields.named.iter_mut() { + field.attrs.retain(|attr| { + !attr + .path + .segments + .iter() + .any(|segment| segment.ident == "wasm_bindgen") + }); + } + } + quote! { #item_struct } + } + item => { + quote!(#item) + } + } + .into() +} diff --git a/sdk/package.json b/sdk/package.json new file mode 120000 index 00000000000000..aa87faef28d8ce --- /dev/null +++ b/sdk/package.json @@ -0,0 +1 @@ +program/package.json \ No newline at end of file diff --git a/sdk/program/.gitignore b/sdk/program/.gitignore new file mode 100644 index 00000000000000..936e5c57af9478 --- /dev/null +++ b/sdk/program/.gitignore @@ -0,0 +1,2 @@ +/node_modules/ +/package-lock.json diff --git a/sdk/program/Cargo.toml b/sdk/program/Cargo.toml index 94b65aad0e5e9b..ffa28f282a1abd 100644 --- a/sdk/program/Cargo.toml +++ b/sdk/program/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-program" -version = "1.9.0" +version = "1.9.4" description = "Solana Program" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -29,9 +29,9 @@ serde_bytes = "0.11" serde_derive = "1.0.103" sha2 = "0.9.2" sha3 = "0.9.1" -solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.0" } -solana-sdk-macro = { path = "../macro", version = "=1.9.0" } +solana-frozen-abi = { path = "../../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../../frozen-abi/macro", version = "=1.9.4" } +solana-sdk-macro = { path = "../macro", version = "=1.9.4" } thiserror = "1.0" [target.'cfg(not(target_arch = "bpf"))'.dependencies] @@ -40,17 +40,27 @@ base64 = "0.13" curve25519-dalek = "3.0.0" libsecp256k1 = "0.6.0" rand = "0.7.0" -solana-logger = { path = "../../logger", version = "=1.9.0" } +solana-logger = { path = "../../logger", version = "=1.9.4" } itertools = "0.10.1" +wasm-bindgen = "0.2" + +[target.'cfg(target_arch = "wasm32")'.dependencies] +console_error_panic_hook = "0.1.7" +console_log = "0.2.0" +js-sys = "0.3.55" +getrandom = { version = "0.1", features = ["wasm-bindgen"] } [target.'cfg(not(target_pointer_width = "64"))'.dependencies] parking_lot = "0.11" [dev-dependencies] -static_assertions = "1.1.0" +anyhow = "1.0.45" assert_matches = "1.3.0" bincode = "1.3.1" +borsh = "0.9.1" +borsh-derive = "0.9.1" serde_json = "1.0.56" +static_assertions = "1.1.0" [build-dependencies] rustc_version = "0.4" diff --git a/sdk/program/package.json b/sdk/program/package.json new file mode 100644 index 00000000000000..f1f074ff205daf --- /dev/null +++ b/sdk/program/package.json @@ -0,0 +1,14 @@ +{ + "devDependencies": { + "chai": "^4.3.4", + "mocha": "^9.1.2", + "prettier": "^2.4.1" + }, + "scripts": { + "postinstall": "npm run build", + "build": "wasm-pack build --target nodejs --dev --out-dir node_modules/crate --out-name crate", + "pretty": "prettier --check 'tests/*.mjs'", + "pretty:fix": "prettier --write 'tests/*.mjs'", + "test": "mocha 'tests/*.mjs'" + } +} diff --git a/sdk/program/src/hash.rs b/sdk/program/src/hash.rs index c150268cd36e78..66f5ecaa9801f0 100644 --- a/sdk/program/src/hash.rs +++ b/sdk/program/src/hash.rs @@ -1,7 +1,7 @@ //! The `hash` module provides functions for creating SHA-256 hashes. use { - crate::sanitize::Sanitize, + crate::{sanitize::Sanitize, wasm_bindgen}, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, sha2::{Digest, Sha256}, std::{convert::TryFrom, fmt, mem, str::FromStr}, @@ -11,6 +11,8 @@ use { pub const HASH_BYTES: usize = 32; /// Maximum string length of a base58 encoded hash const MAX_BASE58_LEN: usize = 44; + +#[wasm_bindgen] #[derive( Serialize, Deserialize, diff --git a/sdk/program/src/instruction.rs b/sdk/program/src/instruction.rs index 51324ab60953b3..5e43cf68a6e370 100644 --- a/sdk/program/src/instruction.rs +++ b/sdk/program/src/instruction.rs @@ -1,8 +1,20 @@ +//! Types for directing the execution of Solana programs. +//! +//! Every invocation of a Solana program executes a single instruction, as +//! defined by the [`Instruction`] type. An instruction is primarily a vector of +//! bytes, the contents of which are program-specific, and not interpreted by +//! the Solana runtime. This allows flexibility in how programs behave, how they +//! are controlled by client software, and what data encodings they use. +//! +//! Besides the instruction data, every account a program may read or write +//! while executing a given instruction is also included in `Instruction`, as +//! [`AccountMeta`] values. The runtime uses this information to efficiently +//! schedule execution of transactions. + #![allow(clippy::integer_arithmetic)] -//! Defines a composable Instruction type and a memory-efficient CompiledInstruction. use { - crate::{pubkey::Pubkey, sanitize::Sanitize, short_vec}, + crate::{pubkey::Pubkey, sanitize::Sanitize, short_vec, wasm_bindgen}, bincode::serialize, borsh::BorshSerialize, serde::Serialize, @@ -240,31 +252,132 @@ pub enum InstructionError { // conversions must also be added } +/// A directive for a single invocation of a Solana program. +/// +/// An instruction specifies which program it is calling, which accounts it may +/// read or modify, and additional data that serves as input to the program. One +/// or more instructions are included in transactions submitted by Solana +/// clients. Instructions are also used to describe [cross-program +/// invocations][cpi]. +/// +/// [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs +/// +/// During execution, a program will receive a list of account data as one of +/// its arguments, in the same order as specified during `Instruction` +/// construction. +/// +/// While Solana is agnostic to the format of the instruction data, it has +/// built-in support for serialization via [`borsh`] and [`bincode`]. +/// +/// [`borsh`]: https://docs.rs/borsh/latest/borsh/ +/// [`bincode`]: https://docs.rs/bincode/latest/bincode/ +/// +/// # Specifying account metadata +/// +/// When constructing an [`Instruction`], a list of all accounts that may be +/// read or written during the execution of that instruction must be supplied as +/// [`AccountMeta`] values. +/// +/// Any account whose data may be mutated by the program during execution must +/// be specified as writable. During execution, writing to an account that was +/// not specified as writable will cause the transaction to fail. Writing to an +/// account that is not owned by the program will cause the transaction to fail. +/// +/// Any account whose lamport balance may be mutated by the program during +/// execution must be specified as writable. During execution, mutating the +/// lamports of an account that was not specified as writable will cause the +/// transaction to fail. While _subtracting_ lamports from an account not owned +/// by the program will cause the transaction to fail, _adding_ lamports to any +/// account is allowed, as long is it is mutable. +/// +/// Accounts that are not read or written by the program may still be specified +/// in an `Instruction`'s account list. These will affect scheduling of program +/// execution by the runtime, but will otherwise be ignored. +/// +/// When building a transaction, the Solana runtime coalesces all accounts used +/// by all instructions in that transaction, along with accounts and permissions +/// required by the runtime, into a single account list. Some accounts and +/// account permissions required by the runtime to process a transaction are +/// _not_ required to be included in an `Instruction`s account list. These +/// include: +/// +/// - The program ID — it is a separate field of `Instruction` +/// - The transaction's fee-paying account — it is added during [`Message`] +/// construction. A program may still require the fee payer as part of the +/// account list if it directly references it. +/// +/// [`Message`]: crate::message::Message +/// +/// Programs may require signatures from some accounts, in which case they +/// should be specified as signers during `Instruction` construction. The +/// program must still validate during execution that the account is a signer. +#[wasm_bindgen] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct Instruction { - /// Pubkey of the instruction processor that executes this instruction + /// Pubkey of the program that executes this instruction. + #[wasm_bindgen(skip)] pub program_id: Pubkey, - /// Metadata for what accounts should be passed to the instruction processor + /// Metadata describing accounts that should be passed to the program. + #[wasm_bindgen(skip)] pub accounts: Vec, - /// Opaque data passed to the instruction processor + /// Opaque data passed to the program for its own interpretation. + #[wasm_bindgen(skip)] pub data: Vec, } impl Instruction { - #[deprecated( - since = "1.6.0", - note = "Please use another Instruction constructor instead, such as `Instruction::new_with_bincode`" - )] - pub fn new(program_id: Pubkey, data: &T, accounts: Vec) -> Self { - Self::new_with_bincode(program_id, data, accounts) - } - - pub fn new_with_bincode( + /// Create a new instruction from a value, encoded with [`borsh`]. + /// + /// [`borsh`]: https://docs.rs/borsh/latest/borsh/ + /// + /// `program_id` is the address of the program that will execute the instruction. + /// `accounts` contains a description of all accounts that may be accessed by the program. + /// + /// Borsh serialization is often prefered over bincode as it has a stable + /// [specification] and an [implementation in JavaScript][jsb], neither of + /// which are true of bincode. + /// + /// [specification]: https://borsh.io/ + /// [jsb]: https://github.com/near/borsh-js + /// + /// # Examples + /// + /// ``` + /// # use solana_program::{ + /// # pubkey::Pubkey, + /// # instruction::{AccountMeta, Instruction}, + /// # }; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # + /// #[derive(BorshSerialize, BorshDeserialize)] + /// pub struct MyInstruction { + /// pub lamports: u64, + /// } + /// + /// pub fn create_instruction( + /// program_id: &Pubkey, + /// from: &Pubkey, + /// to: &Pubkey, + /// lamports: u64, + /// ) -> Instruction { + /// let instr = MyInstruction { lamports }; + /// + /// Instruction::new_with_borsh( + /// *program_id, + /// &instr, + /// vec![ + /// AccountMeta::new(*from, true), + /// AccountMeta::new(*to, false), + /// ], + /// ) + /// } + /// ``` + pub fn new_with_borsh( program_id: Pubkey, data: &T, accounts: Vec, ) -> Self { - let data = serialize(data).unwrap(); + let data = data.try_to_vec().unwrap(); Self { program_id, accounts, @@ -272,12 +385,51 @@ impl Instruction { } } - pub fn new_with_borsh( + /// Create a new instruction from a value, encoded with [`bincode`]. + /// + /// [`bincode`]: https://docs.rs/bincode/latest/bincode/ + /// + /// `program_id` is the address of the program that will execute the instruction. + /// `accounts` contains a description of all accounts that may be accessed by the program. + /// + /// # Examples + /// + /// ``` + /// # use solana_program::{ + /// # pubkey::Pubkey, + /// # instruction::{AccountMeta, Instruction}, + /// # }; + /// # use serde::{Serialize, Deserialize}; + /// # + /// #[derive(Serialize, Deserialize)] + /// pub struct MyInstruction { + /// pub lamports: u64, + /// } + /// + /// pub fn create_instruction( + /// program_id: &Pubkey, + /// from: &Pubkey, + /// to: &Pubkey, + /// lamports: u64, + /// ) -> Instruction { + /// let instr = MyInstruction { lamports }; + /// + /// Instruction::new_with_bincode( + /// *program_id, + /// &instr, + /// vec![ + /// AccountMeta::new(*from, true), + /// AccountMeta::new(*to, false), + /// ], + /// ) + /// } + /// ``` + pub fn new_with_bincode( program_id: Pubkey, data: &T, accounts: Vec, ) -> Self { - let data = data.try_to_vec().unwrap(); + let data = serialize(data).unwrap(); Self { program_id, accounts, @@ -285,6 +437,50 @@ impl Instruction { } } + /// Create a new instruction from a byte slice. + /// + /// `program_id` is the address of the program that will execute the instruction. + /// `accounts` contains a description of all accounts that may be accessed by the program. + /// + /// The caller is responsible for ensuring the correct encoding of `data` as expected + /// by the callee program. + /// + /// # Examples + /// + /// ``` + /// # use solana_program::{ + /// # pubkey::Pubkey, + /// # instruction::{AccountMeta, Instruction}, + /// # }; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # use anyhow::Result; + /// # + /// #[derive(BorshSerialize, BorshDeserialize)] + /// pub struct MyInstruction { + /// pub lamports: u64, + /// } + /// + /// pub fn create_instruction( + /// program_id: &Pubkey, + /// from: &Pubkey, + /// to: &Pubkey, + /// lamports: u64, + /// ) -> Result { + /// let instr = MyInstruction { lamports }; + /// + /// let mut instr_in_bytes: Vec = Vec::new(); + /// instr.serialize(&mut instr_in_bytes)?; + /// + /// Ok(Instruction::new_with_bytes( + /// *program_id, + /// &instr_in_bytes, + /// vec![ + /// AccountMeta::new(*from, true), + /// AccountMeta::new(*to, false), + /// ], + /// )) + /// } + /// ``` pub fn new_with_bytes(program_id: Pubkey, data: &[u8], accounts: Vec) -> Self { Self { program_id, @@ -292,24 +488,76 @@ impl Instruction { data: data.to_vec(), } } + + #[deprecated( + since = "1.6.0", + note = "Please use another Instruction constructor instead, such as `Instruction::new_with_borsh`" + )] + pub fn new(program_id: Pubkey, data: &T, accounts: Vec) -> Self { + Self::new_with_bincode(program_id, data, accounts) + } } +/// Addition that returns [`InstructionError::InsufficientFunds`] on overflow. +/// +/// This is an internal utility function. +#[doc(hidden)] pub fn checked_add(a: u64, b: u64) -> Result { a.checked_add(b).ok_or(InstructionError::InsufficientFunds) } -/// Account metadata used to define Instructions +/// Describes a single account read or written by a program during instruction +/// execution. +/// +/// When constructing an [`Instruction`], a list of all accounts that may be +/// read or written during the execution of that instruction must be supplied. +/// Any account that may be mutated by the program during execution, either its +/// data or metadata such as held lamports, must be writable. +/// +/// Note that because the Solana runtime schedules parallel transaction +/// execution around which accounts are writable, care should be taken that only +/// accounts which actually may be mutated are specified as writable. As the +/// default [`AccountMeta::new`] constructor creates writable accounts, this is +/// a minor hazard: use [`AccountMeta::new_readonly`] to specify that an account +/// is not writable. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct AccountMeta { - /// An account's public key + /// An account's public key. pub pubkey: Pubkey, - /// True if an Instruction requires a Transaction signature matching `pubkey`. + /// True if an `Instruction` requires a `Transaction` signature matching `pubkey`. pub is_signer: bool, - /// True if the `pubkey` can be loaded as a read-write account. + /// True if the account data or metadata may be mutated during program execution. pub is_writable: bool, } impl AccountMeta { + /// Construct metadata for a writable account. + /// + /// # Examples + /// + /// ``` + /// # use solana_program::{ + /// # pubkey::Pubkey, + /// # instruction::{AccountMeta, Instruction}, + /// # }; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # + /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # pub struct MyInstruction; + /// # + /// # let instruction = MyInstruction; + /// # let from = Pubkey::new_unique(); + /// # let to = Pubkey::new_unique(); + /// # let program_id = Pubkey::new_unique(); + /// let instr = Instruction::new_with_borsh( + /// program_id, + /// &instruction, + /// vec![ + /// AccountMeta::new(from, true), + /// AccountMeta::new(to, false), + /// ], + /// ); + /// ``` pub fn new(pubkey: Pubkey, is_signer: bool) -> Self { Self { pubkey, @@ -318,6 +566,35 @@ impl AccountMeta { } } + /// Construct metadata for a read-only account. + /// + /// # Examples + /// + /// ``` + /// # use solana_program::{ + /// # pubkey::Pubkey, + /// # instruction::{AccountMeta, Instruction}, + /// # }; + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # + /// # #[derive(BorshSerialize, BorshDeserialize)] + /// # pub struct MyInstruction; + /// # + /// # let instruction = MyInstruction; + /// # let from = Pubkey::new_unique(); + /// # let to = Pubkey::new_unique(); + /// # let from_account_storage = Pubkey::new_unique(); + /// # let program_id = Pubkey::new_unique(); + /// let instr = Instruction::new_with_borsh( + /// program_id, + /// &instruction, + /// vec![ + /// AccountMeta::new(from, true), + /// AccountMeta::new(to, false), + /// AccountMeta::new_readonly(from_account_storage, false), + /// ], + /// ); + /// ``` pub fn new_readonly(pubkey: Pubkey, is_signer: bool) -> Self { Self { pubkey, @@ -327,16 +604,22 @@ impl AccountMeta { } } -/// An instruction to execute a program +/// A compact encoding of an instruction. +/// +/// A `CompiledInstruction` is a component of a multi-instruction [`Message`], +/// which is the core of a Solana transaction. It is created during the +/// construction of `Message`. Most users will not interact with it directly. +/// +/// [`Message`]: crate::message::Message #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample)] #[serde(rename_all = "camelCase")] pub struct CompiledInstruction { - /// Index into the transaction keys array indicating the program account that executes this instruction + /// Index into the transaction keys array indicating the program account that executes this instruction. pub program_id_index: u8, - /// Ordered indices into the transaction keys array indicating which accounts to pass to the program + /// Ordered indices into the transaction keys array indicating which accounts to pass to the program. #[serde(with = "short_vec")] pub accounts: Vec, - /// The program input data + /// The program input data. #[serde(with = "short_vec")] pub data: Vec, } diff --git a/sdk/program/src/lib.rs b/sdk/program/src/lib.rs index 3d5768a6d909e4..8ac5140ad8374f 100644 --- a/sdk/program/src/lib.rs +++ b/sdk/program/src/lib.rs @@ -52,6 +52,12 @@ pub mod stake_history; pub mod system_instruction; pub mod system_program; pub mod sysvar; +pub mod wasm; + +#[cfg(target_arch = "bpf")] +pub use solana_sdk_macro::wasm_bindgen_stub as wasm_bindgen; +#[cfg(not(target_arch = "bpf"))] +pub use wasm_bindgen::prelude::wasm_bindgen; pub mod config { pub mod program { diff --git a/sdk/program/src/log.rs b/sdk/program/src/log.rs index a205241929c50c..4655c1b1fdc054 100644 --- a/sdk/program/src/log.rs +++ b/sdk/program/src/log.rs @@ -21,12 +21,10 @@ macro_rules! info { /// Print a message to the log /// -/// There are two fast forms: +/// Fast form: /// 1. Single string: `msg!("hi")` -/// 2. 5 integers: `msg!(1, 2, 3, 4, 5)` /// -/// The third form is more generic and incurs a very large runtime overhead so it should be used -/// with care: +/// The generic form incurs a very large runtime overhead so it should be used with care: /// 3. Generalized format string: `msg!("Hello {}: 1, 2, {}", "World", 3)` /// #[macro_export] @@ -34,15 +32,6 @@ macro_rules! msg { ($msg:expr) => { $crate::log::sol_log($msg) }; - ($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => { - $crate::log::sol_log_64( - $arg1 as u64, - $arg2 as u64, - $arg3 as u64, - $arg4 as u64, - $arg5 as u64, - ) - }; ($($arg:tt)*) => ($crate::log::sol_log(&format!($($arg)*))); } @@ -108,7 +97,7 @@ pub fn sol_log_data(data: &[&[u8]]) { #[allow(dead_code)] pub fn sol_log_slice(slice: &[u8]) { for (i, s) in slice.iter().enumerate() { - msg!(0, 0, 0, i, *s); + sol_log_64(0, 0, 0, i as u64, *s as u64); } } @@ -120,15 +109,15 @@ pub fn sol_log_slice(slice: &[u8]) { pub fn sol_log_params(accounts: &[AccountInfo], data: &[u8]) { for (i, account) in accounts.iter().enumerate() { msg!("AccountInfo"); - msg!(0, 0, 0, 0, i); + sol_log_64(0, 0, 0, 0, i as u64); msg!("- Is signer"); - msg!(0, 0, 0, 0, account.is_signer); + sol_log_64(0, 0, 0, 0, account.is_signer as u64); msg!("- Key"); account.key.log(); msg!("- Lamports"); - msg!(0, 0, 0, 0, account.lamports()); + sol_log_64(0, 0, 0, 0, account.lamports()); msg!("- Account data length"); - msg!(0, 0, 0, 0, account.data_len()); + sol_log_64(0, 0, 0, 0, account.data_len() as u64); msg!("- Owner"); account.owner.log(); } diff --git a/sdk/program/src/message/legacy.rs b/sdk/program/src/message/legacy.rs index 1961018c59aaa0..cd9d65e716ff3d 100644 --- a/sdk/program/src/message/legacy.rs +++ b/sdk/program/src/message/legacy.rs @@ -12,11 +12,10 @@ use { serialize_utils::{ append_slice, append_u16, append_u8, read_pubkey, read_slice, read_u16, read_u8, }, - short_vec, system_instruction, system_program, sysvar, + short_vec, system_instruction, system_program, sysvar, wasm_bindgen, }, - itertools::Itertools, lazy_static::lazy_static, - std::{convert::TryFrom, str::FromStr}, + std::{collections::BTreeSet, convert::TryFrom, str::FromStr}, }; lazy_static! { @@ -159,24 +158,28 @@ fn get_keys(instructions: &[Instruction], payer: Option<&Pubkey>) -> Instruction /// Return program ids referenced by all instructions. No duplicates and order is preserved. fn get_program_ids(instructions: &[Instruction]) -> Vec { + let mut set = BTreeSet::new(); instructions .iter() .map(|ix| ix.program_id) - .unique() + .filter(|&program_id| set.insert(program_id)) .collect() } // NOTE: Serialization-related changes must be paired with the custom serialization // for versioned messages in the `RemainingLegacyMessage` struct. +#[wasm_bindgen] #[frozen_abi(digest = "2KnLEqfLcTBQqitE22Pp8JYkaqVVbAkGbCfdeHoyxcAU")] #[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] #[serde(rename_all = "camelCase")] pub struct Message { /// The message header, identifying signed and read-only `account_keys` /// NOTE: Serialization-related changes must be paired with the direct read at sigverify. + #[wasm_bindgen(skip)] pub header: MessageHeader, /// All the account keys used by this transaction + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub account_keys: Vec, @@ -185,6 +188,7 @@ pub struct Message { /// Programs that will be executed in sequence and committed in one atomic transaction if all /// succeed. + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub instructions: Vec, } @@ -363,10 +367,9 @@ impl Message { self.program_position(i).is_some() } - pub fn is_writable(&self, i: usize, demote_program_write_locks: bool) -> bool { - let demote_program_id = demote_program_write_locks - && self.is_key_called_as_program(i) - && !self.is_upgradeable_loader_present(); + pub fn is_writable(&self, i: usize) -> bool { + let demote_program_id = + self.is_key_called_as_program(i) && !self.is_upgradeable_loader_present(); (i < (self.header.num_required_signatures - self.header.num_readonly_signed_accounts) as usize || (i >= self.header.num_required_signatures as usize @@ -388,7 +391,7 @@ impl Message { let mut writable_keys = vec![]; let mut readonly_keys = vec![]; for (i, key) in self.account_keys.iter().enumerate() { - if self.is_writable(i, /*demote_program_write_locks=*/ true) { + if self.is_writable(i) { writable_keys.push(key); } else { readonly_keys.push(key); @@ -426,8 +429,7 @@ impl Message { for account_index in &instruction.accounts { let account_index = *account_index as usize; let is_signer = self.is_signer(account_index); - let is_writable = - self.is_writable(account_index, /*demote_program_write_locks=*/ true); + let is_writable = self.is_writable(account_index); let mut meta_byte = 0; if is_signer { meta_byte |= 1 << Self::IS_SIGNER_BIT; @@ -890,13 +892,12 @@ mod tests { recent_blockhash: Hash::default(), instructions: vec![], }; - let demote_program_write_locks = true; - assert!(message.is_writable(0, demote_program_write_locks)); - assert!(!message.is_writable(1, demote_program_write_locks)); - assert!(!message.is_writable(2, demote_program_write_locks)); - assert!(message.is_writable(3, demote_program_write_locks)); - assert!(message.is_writable(4, demote_program_write_locks)); - assert!(!message.is_writable(5, demote_program_write_locks)); + assert!(message.is_writable(0)); + assert!(!message.is_writable(1)); + assert!(!message.is_writable(2)); + assert!(message.is_writable(3)); + assert!(message.is_writable(4)); + assert!(!message.is_writable(5)); } #[test] diff --git a/sdk/program/src/message/mod.rs b/sdk/program/src/message/mod.rs index 8c5737d38378d9..910a03c39cadbe 100644 --- a/sdk/program/src/message/mod.rs +++ b/sdk/program/src/message/mod.rs @@ -5,12 +5,10 @@ pub mod legacy; #[cfg(not(target_arch = "bpf"))] #[path = ""] mod non_bpf_modules { - mod mapped; mod sanitized; - pub mod v0; mod versions; - pub use {mapped::*, sanitized::*, versions::*}; + pub use {sanitized::*, versions::*}; } pub use legacy::Message; diff --git a/sdk/program/src/message/sanitized.rs b/sdk/program/src/message/sanitized.rs index 7a600e002243ea..e934d22a2f2fa5 100644 --- a/sdk/program/src/message/sanitized.rs +++ b/sdk/program/src/message/sanitized.rs @@ -2,7 +2,7 @@ use { crate::{ hash::Hash, instruction::{CompiledInstruction, Instruction}, - message::{MappedAddresses, MappedMessage, Message, MessageHeader}, + message::{v0::{self, LoadedAddresses}, legacy::Message as LegacyMessage, MessageHeader}, pubkey::Pubkey, sanitize::{Sanitize, SanitizeError}, serialize_utils::{append_slice, append_u16, append_u8}, @@ -17,9 +17,9 @@ use { #[derive(Debug, Clone)] pub enum SanitizedMessage { /// Sanitized legacy message - Legacy(Message), + Legacy(LegacyMessage), /// Sanitized version #0 message with mapped addresses - V0(MappedMessage), + V0(v0::LoadedMessage), } #[derive(PartialEq, Debug, Error, Eq, Clone)] @@ -30,8 +30,6 @@ pub enum SanitizeMessageError { ValueOutOfBounds, #[error("invalid value")] InvalidValue, - #[error("duplicate account key")] - DuplicateAccountKey, } impl From for SanitizeMessageError { @@ -44,17 +42,11 @@ impl From for SanitizeMessageError { } } -impl TryFrom for SanitizedMessage { +impl TryFrom for SanitizedMessage { type Error = SanitizeMessageError; - fn try_from(message: Message) -> Result { + fn try_from(message: LegacyMessage) -> Result { message.sanitize()?; - - let sanitized_msg = Self::Legacy(message); - if sanitized_msg.has_duplicates() { - return Err(SanitizeMessageError::DuplicateAccountKey); - } - - Ok(sanitized_msg) + Ok(Self::Legacy(message)) } } @@ -80,12 +72,12 @@ impl SanitizedMessage { pub fn header(&self) -> &MessageHeader { match self { Self::Legacy(message) => &message.header, - Self::V0(mapped_msg) => &mapped_msg.message.header, + Self::V0(message) => &message.header, } } /// Returns a legacy message if this sanitized message wraps one - pub fn legacy_message(&self) -> Option<&Message> { + pub fn legacy_message(&self) -> Option<&LegacyMessage> { if let Self::Legacy(message) = &self { Some(message) } else { @@ -103,7 +95,7 @@ impl SanitizedMessage { pub fn recent_blockhash(&self) -> &Hash { match self { Self::Legacy(message) => &message.recent_blockhash, - Self::V0(mapped_msg) => &mapped_msg.message.recent_blockhash, + Self::V0(message) => &message.recent_blockhash, } } @@ -112,7 +104,7 @@ impl SanitizedMessage { pub fn instructions(&self) -> &[CompiledInstruction] { match self { Self::Legacy(message) => &message.instructions, - Self::V0(mapped_msg) => &mapped_msg.message.instructions, + Self::V0(message) => &message.instructions, } } @@ -123,7 +115,7 @@ impl SanitizedMessage { ) -> impl Iterator { match self { Self::Legacy(message) => message.instructions.iter(), - Self::V0(mapped_msg) => mapped_msg.message.instructions.iter(), + Self::V0(message) => message.instructions.iter(), } .map(move |ix| { ( @@ -138,7 +130,7 @@ impl SanitizedMessage { pub fn account_keys_iter(&self) -> Box + '_> { match self { Self::Legacy(message) => Box::new(message.account_keys.iter()), - Self::V0(mapped_msg) => Box::new(mapped_msg.account_keys_iter()), + Self::V0(message) => Box::new(message.account_keys_iter()), } } @@ -146,7 +138,7 @@ impl SanitizedMessage { pub fn account_keys_len(&self) -> usize { match self { Self::Legacy(message) => message.account_keys.len(), - Self::V0(mapped_msg) => mapped_msg.account_keys_len(), + Self::V0(message) => message.account_keys_len(), } } @@ -187,10 +179,10 @@ impl SanitizedMessage { /// Returns true if the account at the specified index is writable by the /// instructions in this message. - pub fn is_writable(&self, index: usize, demote_program_write_locks: bool) -> bool { + pub fn is_writable(&self, index: usize) -> bool { match self { - Self::Legacy(message) => message.is_writable(index, demote_program_write_locks), - Self::V0(message) => message.is_writable(index, demote_program_write_locks), + Self::Legacy(message) => message.is_writable(index), + Self::V0(message) => message.is_writable(index), } } @@ -214,7 +206,7 @@ impl SanitizedMessage { // 67..69 - data len - u16 // 69..data_len - data #[allow(clippy::integer_arithmetic)] - pub fn serialize_instructions(&self, demote_program_write_locks: bool) -> Vec { + pub fn serialize_instructions(&self) -> Vec { // 64 bytes is a reasonable guess, calculating exactly is slower in benchmarks let mut data = Vec::with_capacity(self.instructions().len() * (32 * 2)); append_u16(&mut data, self.instructions().len() as u16); @@ -229,7 +221,7 @@ impl SanitizedMessage { for account_index in &instruction.accounts { let account_index = *account_index as usize; let is_signer = self.is_signer(account_index); - let is_writable = self.is_writable(account_index, demote_program_write_locks); + let is_writable = self.is_writable(account_index); let mut account_meta = InstructionsSysvarAccountMeta::NONE; if is_signer { account_meta |= InstructionsSysvarAccountMeta::IS_SIGNER; @@ -251,10 +243,10 @@ impl SanitizedMessage { data } - /// Return the mapped addresses for this message if it has any. - fn mapped_addresses(&self) -> Option<&MappedAddresses> { + /// Return the resolved addresses for this message if it has any. + fn loaded_lookup_table_addresses(&self) -> Option<&LoadedAddresses> { match &self { - SanitizedMessage::V0(message) => Some(&message.mapped_addresses), + SanitizedMessage::V0(message) => Some(&message.loaded_addresses), _ => None, } } @@ -262,7 +254,7 @@ impl SanitizedMessage { /// Return the number of readonly accounts loaded by this message. pub fn num_readonly_accounts(&self) -> usize { let mapped_readonly_addresses = self - .mapped_addresses() + .loaded_lookup_table_addresses() .map(|keys| keys.readonly.len()) .unwrap_or_default(); mapped_readonly_addresses @@ -310,24 +302,9 @@ mod tests { #[test] fn test_try_from_message() { - let dupe_key = Pubkey::new_unique(); - let legacy_message_with_dupes = Message { - header: MessageHeader { - num_required_signatures: 1, - ..MessageHeader::default() - }, - account_keys: vec![dupe_key, dupe_key], - ..Message::default() - }; - - assert_eq!( - SanitizedMessage::try_from(legacy_message_with_dupes).err(), - Some(SanitizeMessageError::DuplicateAccountKey), - ); - - let legacy_message_with_no_signers = Message { + let legacy_message_with_no_signers = LegacyMessage { account_keys: vec![Pubkey::new_unique()], - ..Message::default() + ..LegacyMessage::default() }; assert_eq!( @@ -346,7 +323,7 @@ mod tests { CompiledInstruction::new(2, &(), vec![0, 1]), ]; - let message = SanitizedMessage::try_from(Message::new_with_compiled_instructions( + let message = SanitizedMessage::try_from(LegacyMessage::new_with_compiled_instructions( 1, 0, 2, @@ -370,20 +347,20 @@ mod tests { let key4 = Pubkey::new_unique(); let key5 = Pubkey::new_unique(); - let legacy_message = SanitizedMessage::try_from(Message { + let legacy_message = SanitizedMessage::try_from(LegacyMessage { header: MessageHeader { num_required_signatures: 2, num_readonly_signed_accounts: 1, num_readonly_unsigned_accounts: 1, }, account_keys: vec![key0, key1, key2, key3], - ..Message::default() + ..LegacyMessage::default() }) .unwrap(); assert_eq!(legacy_message.num_readonly_accounts(), 2); - let mapped_message = SanitizedMessage::V0(MappedMessage { + let v0_message = SanitizedMessage::V0(v0::LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 2, @@ -393,13 +370,13 @@ mod tests { account_keys: vec![key0, key1, key2, key3], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key4], readonly: vec![key5], }, }); - assert_eq!(mapped_message.num_readonly_accounts(), 3); + assert_eq!(v0_message.num_readonly_accounts(), 3); } #[test] @@ -426,10 +403,9 @@ mod tests { ), ]; - let demote_program_write_locks = true; - let message = Message::new(&instructions, Some(&id1)); + let message = LegacyMessage::new(&instructions, Some(&id1)); let sanitized_message = SanitizedMessage::try_from(message.clone()).unwrap(); - let serialized = sanitized_message.serialize_instructions(demote_program_write_locks); + let serialized = sanitized_message.serialize_instructions(); // assert that SanitizedMessage::serialize_instructions has the same behavior as the // deprecated Message::serialize_instructions method @@ -438,7 +414,7 @@ mod tests { // assert that Message::deserialize_instruction is compatible with SanitizedMessage::serialize_instructions for (i, instruction) in instructions.iter().enumerate() { assert_eq!( - Message::deserialize_instruction(i, &serialized).unwrap(), + LegacyMessage::deserialize_instruction(i, &serialized).unwrap(), *instruction ); } @@ -481,18 +457,18 @@ mod tests { data: vec![], }; - let legacy_message = SanitizedMessage::try_from(Message { + let legacy_message = SanitizedMessage::try_from(LegacyMessage { header: MessageHeader { num_required_signatures: 1, num_readonly_signed_accounts: 0, num_readonly_unsigned_accounts: 0, }, account_keys: vec![key0, key1, key2, program_id], - ..Message::default() + ..LegacyMessage::default() }) .unwrap(); - let mapped_message = SanitizedMessage::V0(MappedMessage { + let v0_message = SanitizedMessage::V0(v0::LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 1, @@ -502,13 +478,13 @@ mod tests { account_keys: vec![key0, key1], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key2], readonly: vec![program_id], }, }); - for message in vec![legacy_message, mapped_message] { + for message in vec![legacy_message, v0_message] { assert_eq!( message.try_compile_instruction(&valid_instruction), Some(CompiledInstruction { diff --git a/sdk/program/src/message/v0.rs b/sdk/program/src/message/v0.rs deleted file mode 100644 index 9319d32157181d..00000000000000 --- a/sdk/program/src/message/v0.rs +++ /dev/null @@ -1,396 +0,0 @@ -use crate::{ - hash::Hash, - instruction::CompiledInstruction, - message::{MessageHeader, MESSAGE_VERSION_PREFIX}, - pubkey::Pubkey, - sanitize::{Sanitize, SanitizeError}, - short_vec, -}; - -/// Indexes that are mapped to addresses using an on-chain address map for -/// succinctly loading readonly and writable accounts. -#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] -#[serde(rename_all = "camelCase")] -pub struct AddressMapIndexes { - #[serde(with = "short_vec")] - pub writable: Vec, - #[serde(with = "short_vec")] - pub readonly: Vec, -} - -/// Transaction message format which supports succinct account loading with -/// indexes for on-chain address maps. -#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] -#[serde(rename_all = "camelCase")] -pub struct Message { - /// The message header, identifying signed and read-only `account_keys` - pub header: MessageHeader, - - /// List of accounts loaded by this transaction. - #[serde(with = "short_vec")] - pub account_keys: Vec, - - /// The blockhash of a recent block. - pub recent_blockhash: Hash, - - /// Instructions that invoke a designated program, are executed in sequence, - /// and committed in one atomic transaction if all succeed. - /// - /// # Notes - /// - /// Account and program indexes will index into the list of addresses - /// constructed from the concatenation of `account_keys`, flattened list of - /// `writable` address map indexes, and the flattened `readonly` address - /// map indexes. - #[serde(with = "short_vec")] - pub instructions: Vec, - - /// List of address map indexes used to succinctly load additional accounts - /// for this transaction. - /// - /// # Notes - /// - /// The last `address_map_indexes.len()` accounts of the read-only unsigned - /// accounts are loaded as address maps. - #[serde(with = "short_vec")] - pub address_map_indexes: Vec, -} - -impl Sanitize for Message { - fn sanitize(&self) -> Result<(), SanitizeError> { - // signing area and read-only non-signing area should not - // overlap - if usize::from(self.header.num_required_signatures) - .saturating_add(usize::from(self.header.num_readonly_unsigned_accounts)) - > self.account_keys.len() - { - return Err(SanitizeError::IndexOutOfBounds); - } - - // there should be at least 1 RW fee-payer account. - if self.header.num_readonly_signed_accounts >= self.header.num_required_signatures { - return Err(SanitizeError::IndexOutOfBounds); - } - - // there cannot be more address maps than read-only unsigned accounts. - let num_address_map_indexes = self.address_map_indexes.len(); - if num_address_map_indexes > usize::from(self.header.num_readonly_unsigned_accounts) { - return Err(SanitizeError::IndexOutOfBounds); - } - - // each map must load at least one entry - let mut num_loaded_accounts = self.account_keys.len(); - for indexes in &self.address_map_indexes { - let num_loaded_map_entries = indexes - .writable - .len() - .saturating_add(indexes.readonly.len()); - - if num_loaded_map_entries == 0 { - return Err(SanitizeError::InvalidValue); - } - - num_loaded_accounts = num_loaded_accounts.saturating_add(num_loaded_map_entries); - } - - // the number of loaded accounts must be <= 256 since account indices are - // encoded as `u8` - if num_loaded_accounts > 256 { - return Err(SanitizeError::IndexOutOfBounds); - } - - for ci in &self.instructions { - if usize::from(ci.program_id_index) >= num_loaded_accounts { - return Err(SanitizeError::IndexOutOfBounds); - } - // A program cannot be a payer. - if ci.program_id_index == 0 { - return Err(SanitizeError::IndexOutOfBounds); - } - for ai in &ci.accounts { - if usize::from(*ai) >= num_loaded_accounts { - return Err(SanitizeError::IndexOutOfBounds); - } - } - } - - Ok(()) - } -} - -impl Message { - /// Serialize this message with a version #0 prefix using bincode encoding. - pub fn serialize(&self) -> Vec { - bincode::serialize(&(MESSAGE_VERSION_PREFIX, self)).unwrap() - } -} - -#[cfg(test)] -mod tests { - use {super::*, crate::message::VersionedMessage}; - - fn simple_message() -> Message { - Message { - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 1, - }, - account_keys: vec![Pubkey::new_unique(), Pubkey::new_unique()], - address_map_indexes: vec![AddressMapIndexes { - writable: vec![], - readonly: vec![0], - }], - ..Message::default() - } - } - - fn two_map_message() -> Message { - Message { - header: MessageHeader { - num_required_signatures: 1, - num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 2, - }, - account_keys: vec![ - Pubkey::new_unique(), - Pubkey::new_unique(), - Pubkey::new_unique(), - ], - address_map_indexes: vec![ - AddressMapIndexes { - writable: vec![1], - readonly: vec![0], - }, - AddressMapIndexes { - writable: vec![0], - readonly: vec![1], - }, - ], - ..Message::default() - } - } - - #[test] - fn test_sanitize_account_indices() { - assert!(Message { - account_keys: (0..=u8::MAX).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..u8::MAX).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - account_keys: (0..u8::MAX).map(|_| Pubkey::new_unique()).collect(), - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..u8::MAX - 1).map(|_| Pubkey::new_unique()).collect(), - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..53).step_by(2).collect(), - readonly: (1..53).step_by(2).collect(), - }, - ], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..two_map_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..52).step_by(2).collect(), - readonly: (1..52).step_by(2).collect(), - }, - ], - instructions: vec![CompiledInstruction { - program_id_index: 1, - accounts: vec![u8::MAX], - data: vec![], - }], - ..two_map_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_sanitize_excessive_loaded_accounts() { - assert!(Message { - account_keys: (0..=u8::MAX).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..257).map(|_| Pubkey::new_unique()).collect(), - address_map_indexes: vec![], - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - account_keys: (0..u8::MAX).map(|_| Pubkey::new_unique()).collect(), - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - account_keys: (0..256).map(|_| Pubkey::new_unique()).collect(), - ..simple_message() - } - .sanitize() - .is_err()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..53).step_by(2).collect(), - readonly: (1..53).step_by(2).collect(), - } - ], - ..two_map_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![ - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - }, - AddressMapIndexes { - writable: (0..200).step_by(2).collect(), - readonly: (1..200).step_by(2).collect(), - } - ], - ..two_map_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_sanitize_excessive_maps() { - assert!(Message { - header: MessageHeader { - num_readonly_unsigned_accounts: 1, - ..simple_message().header - }, - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - header: MessageHeader { - num_readonly_unsigned_accounts: 0, - ..simple_message().header - }, - ..simple_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_sanitize_address_map() { - assert!(Message { - address_map_indexes: vec![AddressMapIndexes { - writable: vec![0], - readonly: vec![], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![AddressMapIndexes { - writable: vec![], - readonly: vec![0], - }], - ..simple_message() - } - .sanitize() - .is_ok()); - - assert!(Message { - address_map_indexes: vec![AddressMapIndexes { - writable: vec![], - readonly: vec![], - }], - ..simple_message() - } - .sanitize() - .is_err()); - } - - #[test] - fn test_serialize() { - let message = simple_message(); - let versioned_msg = VersionedMessage::V0(message.clone()); - assert_eq!(message.serialize(), versioned_msg.serialize()); - } -} diff --git a/sdk/program/src/message/versions.rs b/sdk/program/src/message/versions/mod.rs similarity index 91% rename from sdk/program/src/message/versions.rs rename to sdk/program/src/message/versions/mod.rs index 1ec621d74c0d50..9242731af611bb 100644 --- a/sdk/program/src/message/versions.rs +++ b/sdk/program/src/message/versions/mod.rs @@ -2,7 +2,7 @@ use { crate::{ hash::Hash, instruction::CompiledInstruction, - message::{v0, Message, MessageHeader}, + message::{legacy::Message as LegacyMessage, MessageHeader}, pubkey::Pubkey, sanitize::{Sanitize, SanitizeError}, short_vec, @@ -15,6 +15,8 @@ use { std::fmt, }; +pub mod v0; + /// Bit mask that indicates whether a serialized message is versioned. pub const MESSAGE_VERSION_PREFIX: u8 = 0x80; @@ -26,10 +28,10 @@ pub const MESSAGE_VERSION_PREFIX: u8 = 0x80; /// which message version is serialized starting from version `0`. If the first /// is bit is not set, all bytes are used to encode the legacy `Message` /// format. -#[frozen_abi(digest = "x2F3RG2RhJQWN6L2N3jebvcAvNYFrhE3sKTPJ4sENvL")] +#[frozen_abi(digest = "G4EAiqmGgBprgf5ePYemLJcoFfx4R7rhC1Weo2FVJ7fn")] #[derive(Debug, PartialEq, Eq, Clone, AbiEnumVisitor, AbiExample)] pub enum VersionedMessage { - Legacy(Message), + Legacy(LegacyMessage), V0(v0::Message), } @@ -98,7 +100,7 @@ impl VersionedMessage { impl Default for VersionedMessage { fn default() -> Self { - Self::Legacy(Message::default()) + Self::Legacy(LegacyMessage::default()) } } @@ -206,7 +208,7 @@ impl<'de> Deserialize<'de> for VersionedMessage { de::Error::invalid_length(1, &self) })?; - Ok(VersionedMessage::Legacy(Message { + Ok(VersionedMessage::Legacy(LegacyMessage { header: MessageHeader { num_required_signatures, num_readonly_signed_accounts: message.num_readonly_signed_accounts, @@ -247,7 +249,7 @@ mod tests { super::*, crate::{ instruction::{AccountMeta, Instruction}, - message::v0::AddressMapIndexes, + message::v0::MessageAddressTableLookup, }, }; @@ -274,7 +276,7 @@ mod tests { ), ]; - let mut message = Message::new(&instructions, Some(&id1)); + let mut message = LegacyMessage::new(&instructions, Some(&id1)); message.recent_blockhash = Hash::new_unique(); let bytes1 = bincode::serialize(&message).unwrap(); @@ -282,7 +284,7 @@ mod tests { assert_eq!(bytes1, bytes2); - let message1: Message = bincode::deserialize(&bytes1).unwrap(); + let message1: LegacyMessage = bincode::deserialize(&bytes1).unwrap(); let message2: VersionedMessage = bincode::deserialize(&bytes2).unwrap(); if let VersionedMessage::Legacy(message2) = message2 { @@ -299,27 +301,27 @@ mod tests { header: MessageHeader { num_required_signatures: 1, num_readonly_signed_accounts: 0, - num_readonly_unsigned_accounts: 2, + num_readonly_unsigned_accounts: 0, }, recent_blockhash: Hash::new_unique(), account_keys: vec![ Pubkey::new_unique(), - Pubkey::new_unique(), - Pubkey::new_unique(), ], - address_map_indexes: vec![ - AddressMapIndexes { - writable: vec![1], - readonly: vec![0], + address_table_lookups: vec![ + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1], + readonly_indexes: vec![0], }, - AddressMapIndexes { - writable: vec![0], - readonly: vec![1], + MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![1], }, ], instructions: vec![CompiledInstruction { program_id_index: 1, - accounts: vec![0], + accounts: vec![0, 2, 3, 4], data: vec![], }], }; diff --git a/sdk/program/src/message/mapped.rs b/sdk/program/src/message/versions/v0/loaded.rs similarity index 80% rename from sdk/program/src/message/mapped.rs rename to sdk/program/src/message/versions/v0/loaded.rs index 4c60e4fd5d3c6a..7bb62b8b0b3fe2 100644 --- a/sdk/program/src/message/mapped.rs +++ b/sdk/program/src/message/versions/v0/loaded.rs @@ -5,37 +5,44 @@ use { pubkey::Pubkey, sysvar, }, - std::{collections::HashSet, convert::TryFrom}, + std::{collections::HashSet, ops::Deref, convert::TryFrom}, }; -/// Combination of a version #0 message and its mapped addresses +/// Combination of a version #0 message and its loaded addresses #[derive(Debug, Clone)] -pub struct MappedMessage { - /// Message which loaded a collection of mapped addresses +pub struct LoadedMessage { + /// Message which loaded a collection of lookup table addresses pub message: v0::Message, - /// Collection of mapped addresses loaded by this message - pub mapped_addresses: MappedAddresses, + /// Addresses loaded with on-chain address lookup tables + pub loaded_addresses: LoadedAddresses, } -/// Collection of mapped addresses loaded succinctly by a transaction using -/// on-chain address map accounts. +impl Deref for LoadedMessage { + type Target = v0::Message; + fn deref(&self) -> &Self::Target { + &self.message + } +} + +/// Collection of addresses loaded from on-chain lookup tables, split +/// by readonly and writable. #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize)] -pub struct MappedAddresses { +pub struct LoadedAddresses { /// List of addresses for writable loaded accounts pub writable: Vec, /// List of addresses for read-only loaded accounts pub readonly: Vec, } -impl MappedMessage { +impl LoadedMessage { /// Returns an iterator of account key segments. The ordering of segments /// affects how account indexes from compiled instructions are resolved and /// so should not be changed. fn account_keys_segment_iter(&self) -> impl Iterator> { vec![ &self.message.account_keys, - &self.mapped_addresses.writable, - &self.mapped_addresses.readonly, + &self.loaded_addresses.writable, + &self.loaded_addresses.readonly, ] .into_iter() } @@ -82,7 +89,7 @@ impl MappedMessage { let num_signed_accounts = usize::from(header.num_required_signatures); if key_index >= num_account_keys { let mapped_addresses_index = key_index.saturating_sub(num_account_keys); - mapped_addresses_index < self.mapped_addresses.writable.len() + mapped_addresses_index < self.loaded_addresses.writable.len() } else if key_index >= num_signed_accounts { let num_unsigned_accounts = num_account_keys.saturating_sub(num_signed_accounts); let num_writable_unsigned_accounts = num_unsigned_accounts @@ -97,11 +104,10 @@ impl MappedMessage { } /// Returns true if the account at the specified index was loaded as writable - pub fn is_writable(&self, key_index: usize, demote_program_write_locks: bool) -> bool { + pub fn is_writable(&self, key_index: usize) -> bool { if self.is_writable_index(key_index) { if let Some(key) = self.get_account_key(key_index) { - let demote_program_id = demote_program_write_locks - && self.is_key_called_as_program(key_index) + let demote_program_id = self.is_key_called_as_program(key_index) && !self.is_upgradeable_loader_present(); return !(sysvar::is_sysvar_id(key) || BUILTIN_PROGRAMS_KEYS.contains(key) @@ -138,7 +144,7 @@ mod tests { itertools::Itertools, }; - fn create_test_mapped_message() -> (MappedMessage, [Pubkey; 6]) { + fn check_test_loaded_message() -> (LoadedMessage, [Pubkey; 6]) { let key0 = Pubkey::new_unique(); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); @@ -146,7 +152,7 @@ mod tests { let key4 = Pubkey::new_unique(); let key5 = Pubkey::new_unique(); - let message = MappedMessage { + let message = LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 2, @@ -156,7 +162,7 @@ mod tests { account_keys: vec![key0, key1, key2, key3], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key4], readonly: vec![key5], }, @@ -167,7 +173,7 @@ mod tests { #[test] fn test_account_keys_segment_iter() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); let expected_segments = vec![ vec![keys[0], keys[1], keys[2], keys[3]], @@ -183,14 +189,14 @@ mod tests { #[test] fn test_account_keys_len() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); assert_eq!(message.account_keys_len(), keys.len()); } #[test] fn test_account_keys_iter() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); let mut iter = message.account_keys_iter(); for expected_key in keys { @@ -200,19 +206,19 @@ mod tests { #[test] fn test_has_duplicates() { - let message = create_test_mapped_message().0; + let message = check_test_loaded_message().0; assert!(!message.has_duplicates()); } #[test] fn test_has_duplicates_with_dupe_keys() { - let create_message_with_dupe_keys = |mut keys: Vec| MappedMessage { + let create_message_with_dupe_keys = |mut keys: Vec| LoadedMessage { message: v0::Message { account_keys: keys.split_off(2), ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: keys.split_off(2), readonly: keys, }, @@ -234,7 +240,7 @@ mod tests { #[test] fn test_get_account_key() { - let (message, keys) = create_test_mapped_message(); + let (message, keys) = check_test_loaded_message(); assert_eq!(message.get_account_key(0), Some(&keys[0])); assert_eq!(message.get_account_key(1), Some(&keys[1])); @@ -246,7 +252,7 @@ mod tests { #[test] fn test_is_writable_index() { - let message = create_test_mapped_message().0; + let message = check_test_loaded_message().0; assert!(message.is_writable_index(0)); assert!(!message.is_writable_index(1)); @@ -258,15 +264,15 @@ mod tests { #[test] fn test_is_writable() { - let mut mapped_msg = create_test_mapped_message().0; + let mut message = check_test_loaded_message().0; - mapped_msg.message.account_keys[0] = sysvar::clock::id(); - assert!(mapped_msg.is_writable_index(0)); - assert!(!mapped_msg.is_writable(0, /*demote_program_write_locks=*/ true)); + message.message.account_keys[0] = sysvar::clock::id(); + assert!(message.is_writable_index(0)); + assert!(!message.is_writable(0)); - mapped_msg.message.account_keys[0] = system_program::id(); - assert!(mapped_msg.is_writable_index(0)); - assert!(!mapped_msg.is_writable(0, /*demote_program_write_locks=*/ true)); + message.message.account_keys[0] = system_program::id(); + assert!(message.is_writable_index(0)); + assert!(!message.is_writable(0)); } #[test] @@ -274,7 +280,7 @@ mod tests { let key0 = Pubkey::new_unique(); let key1 = Pubkey::new_unique(); let key2 = Pubkey::new_unique(); - let mapped_msg = MappedMessage { + let message = LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 1, @@ -289,13 +295,13 @@ mod tests { }], ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![key1, key2], readonly: vec![], }, }; - assert!(mapped_msg.is_writable_index(2)); - assert!(!mapped_msg.is_writable(2, /*demote_program_write_locks=*/ true)); + assert!(message.is_writable_index(2)); + assert!(!message.is_writable(2)); } } diff --git a/sdk/program/src/message/versions/v0/mod.rs b/sdk/program/src/message/versions/v0/mod.rs new file mode 100644 index 00000000000000..ac0e59919971b2 --- /dev/null +++ b/sdk/program/src/message/versions/v0/mod.rs @@ -0,0 +1,374 @@ +use crate::{ + hash::Hash, + instruction::CompiledInstruction, + message::{MessageHeader, MESSAGE_VERSION_PREFIX}, + pubkey::Pubkey, + sanitize::{Sanitize, SanitizeError}, + short_vec, +}; + +mod loaded; + +pub use loaded::*; + +/// Address table lookups describe an on-chain address lookup table to use +/// for loading more readonly and writable accounts in a single tx. +#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] +#[serde(rename_all = "camelCase")] +pub struct MessageAddressTableLookup { + /// Address lookup table account key + pub account_key: Pubkey, + /// List of indexes used to load writable account addresses + #[serde(with = "short_vec")] + pub writable_indexes: Vec, + /// List of indexes used to load readonly account addresses + #[serde(with = "short_vec")] + pub readonly_indexes: Vec, +} + +/// Transaction message format which supports succinct account loading with +/// on-chain address lookup tables. +#[derive(Serialize, Deserialize, Default, Debug, PartialEq, Eq, Clone, AbiExample)] +#[serde(rename_all = "camelCase")] +pub struct Message { + /// The message header, identifying signed and read-only `account_keys` + pub header: MessageHeader, + + /// List of accounts loaded by this transaction. + #[serde(with = "short_vec")] + pub account_keys: Vec, + + /// The blockhash of a recent block. + pub recent_blockhash: Hash, + + /// Instructions that invoke a designated program, are executed in sequence, + /// and committed in one atomic transaction if all succeed. + /// + /// # Notes + /// + /// Account and program indexes will index into the list of addresses + /// constructed from the concatenation of three key lists: + /// 1) message `account_keys` + /// 2) ordered list of keys loaded from `writable` lookup table indexes + /// 3) ordered list of keys loaded from `readable` lookup table indexes + #[serde(with = "short_vec")] + pub instructions: Vec, + + /// List of address table lookups used to load additional accounts + /// for this transaction. + #[serde(with = "short_vec")] + pub address_table_lookups: Vec, +} + +impl Sanitize for Message { + fn sanitize(&self) -> Result<(), SanitizeError> { + // signing area and read-only non-signing area should not + // overlap + if usize::from(self.header.num_required_signatures) + .saturating_add(usize::from(self.header.num_readonly_unsigned_accounts)) + > self.account_keys.len() + { + return Err(SanitizeError::IndexOutOfBounds); + } + + // there should be at least 1 RW fee-payer account. + if self.header.num_readonly_signed_accounts >= self.header.num_required_signatures { + return Err(SanitizeError::InvalidValue); + } + + let mut num_loaded_accounts = self.account_keys.len(); + for lookup in &self.address_table_lookups { + let num_table_loaded_accounts = lookup + .writable_indexes + .len() + .saturating_add(lookup.readonly_indexes.len()); + + // each lookup table must be used to load at least one account + if num_table_loaded_accounts == 0 { + return Err(SanitizeError::InvalidValue); + } + + num_loaded_accounts = num_loaded_accounts.saturating_add(num_table_loaded_accounts); + } + + // the number of loaded accounts must be <= 256 since account indices are + // encoded as `u8` + if num_loaded_accounts > 256 { + return Err(SanitizeError::IndexOutOfBounds); + } + + for ci in &self.instructions { + if usize::from(ci.program_id_index) >= num_loaded_accounts { + return Err(SanitizeError::IndexOutOfBounds); + } + // A program cannot be a payer. + if ci.program_id_index == 0 { + return Err(SanitizeError::IndexOutOfBounds); + } + for ai in &ci.accounts { + if usize::from(*ai) >= num_loaded_accounts { + return Err(SanitizeError::IndexOutOfBounds); + } + } + } + + Ok(()) + } +} + +impl Message { + /// Serialize this message with a version #0 prefix using bincode encoding. + pub fn serialize(&self) -> Vec { + bincode::serialize(&(MESSAGE_VERSION_PREFIX, self)).unwrap() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::message::VersionedMessage, + }; + + #[test] + fn test_sanitize() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_instruction() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique(), Pubkey::new_unique()], + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![0], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_table_lookup() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![0], + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_table_lookup_and_ix() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![1, 2, 3], + readonly_indexes: vec![0], + }], + instructions: vec![CompiledInstruction { + program_id_index: 4, + accounts: vec![0, 1, 2, 3], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_without_signer() { + assert!(Message { + header: MessageHeader::default(), + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_without_writable_signer() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + num_readonly_signed_accounts: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_empty_table_lookup() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![], + readonly_indexes: vec![], + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + + + #[test] + fn test_sanitize_with_max_account_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: (0..=u8::MAX).map(|_| Pubkey::new_unique()).collect(), + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_too_many_account_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: (0..=256).map(|_| Pubkey::new_unique()).collect(), + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_max_table_loaded_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..=254).step_by(2).collect(), + readonly_indexes: (1..=254).step_by(2).collect(), + }], + ..Message::default() + } + .sanitize() + .is_ok()); + } + + #[test] + fn test_sanitize_with_too_many_table_loaded_keys() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: (0..=255).step_by(2).collect(), + readonly_indexes: (1..=255).step_by(2).collect(), + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_invalid_ix_program_id() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![0], + readonly_indexes: vec![], + }], + instructions: vec![CompiledInstruction { + program_id_index: 2, + accounts: vec![], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + + #[test] + fn test_sanitize_with_invalid_ix_account() { + assert!(Message { + header: MessageHeader { + num_required_signatures: 1, + ..MessageHeader::default() + }, + account_keys: vec![Pubkey::new_unique()], + address_table_lookups: vec![MessageAddressTableLookup { + account_key: Pubkey::new_unique(), + writable_indexes: vec![], + readonly_indexes: vec![0], + }], + instructions: vec![CompiledInstruction { + program_id_index: 1, + accounts: vec![2], + data: vec![] + }], + ..Message::default() + } + .sanitize() + .is_err()); + } + #[test] + fn test_serialize() { + let message = Message::default(); + let versioned_msg = VersionedMessage::V0(message.clone()); + assert_eq!(message.serialize(), versioned_msg.serialize()); + } +} diff --git a/sdk/program/src/program.rs b/sdk/program/src/program.rs index 45873e2d64a748..f07099a0f5245b 100644 --- a/sdk/program/src/program.rs +++ b/sdk/program/src/program.rs @@ -7,8 +7,6 @@ use crate::{ /// Notes: /// - RefCell checking can be compute unit expensive, to avoid that expense use /// `invoke_unchecked` instead, but at your own risk. -/// - The program id of the instruction being issued must also be included in -/// `account_infos`. pub fn invoke(instruction: &Instruction, account_infos: &[AccountInfo]) -> ProgramResult { invoke_signed(instruction, account_infos, &[]) } @@ -19,8 +17,6 @@ pub fn invoke(instruction: &Instruction, account_infos: &[AccountInfo]) -> Progr /// - The missing checks ensured that the invocation doesn't violate the borrow /// rules of the `AccountInfo` fields that are wrapped in `RefCell`s. To /// include the checks call `invoke` instead. -/// - The program id of the instruction being issued must also be included in -/// `account_infos`. pub fn invoke_unchecked(instruction: &Instruction, account_infos: &[AccountInfo]) -> ProgramResult { invoke_signed_unchecked(instruction, account_infos, &[]) } @@ -30,8 +26,6 @@ pub fn invoke_unchecked(instruction: &Instruction, account_infos: &[AccountInfo] /// Notes: /// - RefCell checking can be compute unit expensive, to avoid that expense use /// `invoke_signed_unchecked` instead, but at your own risk. -/// - The program id of the instruction being issued must also be included in -/// `account_infos`. pub fn invoke_signed( instruction: &Instruction, account_infos: &[AccountInfo], @@ -63,8 +57,6 @@ pub fn invoke_signed( /// - The missing checks ensured that the invocation doesn't violate the borrow /// rules of the `AccountInfo` fields that are wrapped in `RefCell`s. To /// include the checks call `invoke_signed` instead. -/// - The program id of the instruction being issued must also be included in -/// `account_infos`. pub fn invoke_signed_unchecked( instruction: &Instruction, account_infos: &[AccountInfo], diff --git a/sdk/program/src/pubkey.rs b/sdk/program/src/pubkey.rs index 2222f8a6f25f7a..35a6a25eb36ece 100644 --- a/sdk/program/src/pubkey.rs +++ b/sdk/program/src/pubkey.rs @@ -1,6 +1,6 @@ #![allow(clippy::integer_arithmetic)] use { - crate::{decode_error::DecodeError, hash::hashv}, + crate::{decode_error::DecodeError, hash::hashv, wasm_bindgen}, borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, bytemuck::{Pod, Zeroable}, num_derive::{FromPrimitive, ToPrimitive}, @@ -48,6 +48,7 @@ impl From for PubkeyError { } } +#[wasm_bindgen] #[repr(transparent)] #[derive( AbiExample, @@ -67,7 +68,7 @@ impl From for PubkeyError { Serialize, Zeroable, )] -pub struct Pubkey([u8; 32]); +pub struct Pubkey(pub(crate) [u8; 32]); impl crate::sanitize::Sanitize for Pubkey {} @@ -179,91 +180,32 @@ impl Pubkey { )) } - /// Create a program address + /// Find a valid [program derived address][pda] and its corresponding bump seed. /// - /// Program addresses are account keys that only the program has the - /// authority to sign. The address is of the same form as a Solana - /// `Pubkey`, except they are ensured to not be on the ed25519 curve and - /// thus have no associated private key. When performing cross-program - /// invocations the program can "sign" for the key by calling - /// `invoke_signed` and passing the same seeds used to generate the address. - /// The runtime will check that indeed the program associated with this - /// address is the caller and thus authorized to be the signer. + /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses /// - /// Because the program address cannot lie on the ed25519 curve there may be - /// seed and program id combinations that are invalid. In these cases an - /// extra seed (bump seed) can be calculated that results in a point off the - /// curve. Use `find_program_address` to calculate that bump seed. + /// Program derived addresses (PDAs) are account keys that only the program, + /// `program_id`, has the authority to sign. The address is of the same form + /// as a Solana `Pubkey`, except they are ensured to not be on the ed25519 + /// curve and thus have no associated private key. When performing + /// cross-program invocations the program can "sign" for the key by calling + /// [`invoke_signed`] and passing the same seeds used to generate the + /// address, along with the calculated _bump seed_, which this function + /// returns as the second tuple element. The runtime will verify that the + /// program associated with this address is the caller and thus authorized + /// to be the signer. /// - /// Warning: Because of the way the seeds are hashed there is a potential - /// for program address collisions for the same program id. The seeds are - /// hashed sequentially which means that seeds {"abcdef"}, {"abc", "def"}, - /// and {"ab", "cd", "ef"} will all result in the same program address given - /// the same program id. Since the change of collision is local to a given - /// program id the developer of that program must take care to choose seeds - /// that do not collide with themselves. - pub fn create_program_address( - seeds: &[&[u8]], - program_id: &Pubkey, - ) -> Result { - if seeds.len() > MAX_SEEDS { - return Err(PubkeyError::MaxSeedLengthExceeded); - } - for seed in seeds.iter() { - if seed.len() > MAX_SEED_LEN { - return Err(PubkeyError::MaxSeedLengthExceeded); - } - } - - // Perform the calculation inline, calling this from within a program is - // not supported - #[cfg(not(target_arch = "bpf"))] - { - let mut hasher = crate::hash::Hasher::default(); - for seed in seeds.iter() { - hasher.hash(seed); - } - hasher.hashv(&[program_id.as_ref(), PDA_MARKER]); - let hash = hasher.result(); - - if bytes_are_curve_point(hash) { - return Err(PubkeyError::InvalidSeeds); - } - - Ok(Pubkey::new(hash.as_ref())) - } - // Call via a system call to perform the calculation - #[cfg(target_arch = "bpf")] - { - extern "C" { - fn sol_create_program_address( - seeds_addr: *const u8, - seeds_len: u64, - program_id_addr: *const u8, - address_bytes_addr: *const u8, - ) -> u64; - } - let mut bytes = [0; 32]; - let result = unsafe { - sol_create_program_address( - seeds as *const _ as *const u8, - seeds.len() as u64, - program_id as *const _ as *const u8, - &mut bytes as *mut _ as *mut u8, - ) - }; - match result { - crate::entrypoint::SUCCESS => Ok(Pubkey::new(&bytes)), - _ => Err(result.into()), - } - } - } - - /// Find a valid program address and its corresponding bump seed which must - /// be passed as an additional seed when calling `invoke_signed`. + /// [`invoke_signed`]: crate::program::invoke_signed + /// + /// The `seeds` are application-specific, and must be carefully selected to + /// uniquely derive accounts per application requirements. It is common to + /// use static strings and other pubkeys as seeds. /// - /// Panics in the very unlikely event that the additional seed could not be - /// found. + /// Because the program address must not lie on the ed25519 curve, there may + /// be seed and program id combinations that are invalid. For this reason, + /// an extra seed (the bump seed) is calculated that results in a + /// point off the curve. The bump seed must be passed as an additional seed + /// when calling `invoke_signed`. /// /// The processes of finding a valid program address is by trial and error, /// and even though it is deterministic given a set of inputs it can take a @@ -271,28 +213,228 @@ impl Pubkey { /// that when called from an on-chain program it may incur a variable amount /// of the program's compute budget. Programs that are meant to be very /// performant may not want to use this function because it could take a - /// considerable amount of time. Also, programs that area already at risk - /// of exceeding their compute budget should also call this with care since + /// considerable amount of time. Programs that are already at risk + /// of exceeding their compute budget should call this with care since /// there is a chance that the program's budget may be occasionally - /// exceeded. + /// and unpredictably exceeded. + /// + /// As all account addresses accessed by an on-chain Solana program must be + /// explicitly passed to the program, it is typical for the PDAs to be + /// derived in off-chain client programs, avoiding the compute cost of + /// generating the address on-chain. The address may or may not then be + /// verified by re-deriving it on-chain, depending on the requirements of + /// the program. This verification may be performed without the overhead of + /// re-searching for the bump key by using the [`create_program_address`] + /// function. + /// + /// [`create_program_address`]: Pubkey::create_program_address + /// + /// **Warning**: Because of the way the seeds are hashed there is a potential + /// for program address collisions for the same program id. The seeds are + /// hashed sequentially which means that seeds {"abcdef"}, {"abc", "def"}, + /// and {"ab", "cd", "ef"} will all result in the same program address given + /// the same program id. Since the chance of collision is local to a given + /// program id, the developer of that program must take care to choose seeds + /// that do not collide with each other. For seed schemes that are susceptible + /// to this type of hash collision, a common remedy is to insert separators + /// between seeds, e.g. transforming {"abc", "def"} into {"abc", "-", "def"}. + /// + /// # Panics + /// + /// Panics in the statistically improbable event that a bump seed could not be + /// found. Use [`try_find_program_address`] to handle this case. + /// + /// [`try_find_program_address`]: Pubkey::try_find_program_address + /// + /// Panics if any of the following are true: + /// + /// - the number of provided seeds is greater than, _or equal to_, [`MAX_SEEDS`], + /// - any individual seed's length is greater than [`MAX_SEED_LEN`]. + /// + /// # Examples + /// + /// This example illustrates a simple case of creating a "vault" account + /// which is derived from the payer account, but owned by an on-chain + /// program. The program derived address is derived in an off-chain client + /// program, which invokes an on-chain Solana program that uses the address + /// to create a new account owned and controlled by the program itself. + /// + /// By convention, the on-chain program will be compiled for use in two + /// different contexts: both on-chain, to interpret a custom program + /// instruction as a Solana transaction; and off-chain, as a library, so + /// that clients can share the instruction data structure, constructors, and + /// other common code. + /// + /// First the on-chain Solana program: + /// + /// ``` + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # use solana_program::{ + /// # pubkey::Pubkey, + /// # entrypoint::ProgramResult, + /// # program::invoke_signed, + /// # system_instruction, + /// # account_info::{ + /// # AccountInfo, + /// # next_account_info, + /// # }, + /// # }; + /// // The custom instruction processed by our program. It includes the + /// // PDA's bump seed, which is derived by the client program. This + /// // definition is also imported into the off-chain client program. + /// // The computed address of the PDA will be passed to this program via + /// // the `accounts` vector of the `Instruction` type. + /// #[derive(BorshSerialize, BorshDeserialize, Debug)] + /// pub struct InstructionData { + /// pub vault_bump_seed: u8, + /// pub lamports: u64, + /// } + /// + /// // The size in bytes of a vault account. The client program needs + /// // this information to calculate the quantity of lamports necessary + /// // to pay for the account's rent. + /// pub static VAULT_ACCOUNT_SIZE: u64 = 1024; + /// + /// // The entrypoint of the on-chain program, as provided to the + /// // `entrypoint!` macro. + /// fn process_instruction( + /// program_id: &Pubkey, + /// accounts: &[AccountInfo], + /// instruction_data: &[u8], + /// ) -> ProgramResult { + /// let account_info_iter = &mut accounts.iter(); + /// let payer = next_account_info(account_info_iter)?; + /// // The vault PDA, derived from the payer's address + /// let vault = next_account_info(account_info_iter)?; + /// + /// let mut instruction_data = instruction_data; + /// let instr = InstructionData::deserialize(&mut instruction_data)?; + /// let vault_bump_seed = instr.vault_bump_seed; + /// let lamports = instr.lamports; + /// let vault_size = VAULT_ACCOUNT_SIZE; + /// + /// // Invoke the system program to create an account while virtually + /// // signing with the vault PDA, which is owned by this caller program. + /// invoke_signed( + /// &system_instruction::create_account( + /// &payer.key, + /// &vault.key, + /// lamports, + /// vault_size, + /// &program_id, + /// ), + /// &[ + /// payer.clone(), + /// vault.clone(), + /// ], + /// // A slice of seed slices, each seed slice being the set + /// // of seeds used to generate one of the PDAs required by the + /// // callee program, the final seed being a single-element slice + /// // containing the `u8` bump seed. + /// &[ + /// &[ + /// b"vault", + /// payer.key.as_ref(), + /// &[vault_bump_seed], + /// ], + /// ] + /// )?; + /// + /// Ok(()) + /// } + /// ``` + /// + /// The client program: + /// + /// ```ignore + /// # // NB: This example depends on solana_sdk and solana_client, and adding + /// # // those as dev-dependencies would create an unpublishable circular + /// # // dependency, hence it is ignored. + /// # + /// # use borsh::{BorshSerialize, BorshDeserialize}; + /// # use solana_program::pubkey::Pubkey; + /// # use solana_program::instruction::Instruction; + /// # use solana_program::hash::Hash; + /// # use solana_program::instruction::AccountMeta; + /// # use solana_program::system_program; + /// # use solana_sdk::signature::Keypair; + /// # use solana_sdk::signature::{Signer, Signature}; + /// # use solana_sdk::transaction::Transaction; + /// # use solana_client::rpc_client::RpcClient; + /// # use std::convert::TryFrom; + /// # + /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] + /// # struct InstructionData { + /// # pub vault_bump_seed: u8, + /// # pub lamports: u64, + /// # } + /// # + /// # pub static VAULT_ACCOUNT_SIZE: u64 = 1024; + /// # let program_id = Pubkey::new_unique(); + /// # let payer = Keypair::new(); + /// # let rpc_client = RpcClient::new("no-run".to_string()); + /// # + /// // Derive the PDA from the payer account, a string representing the unique + /// // purpose of the account ("vault"), and the address of our on-chain program. + /// let (vault_pubkey, vault_bump_seed) = Pubkey::find_program_address( + /// &[b"vault", payer.pubkey().as_ref()], + /// &program_id + /// ); + /// + /// // Get the amount of lamports needed to pay for the vault's rent + /// let vault_account_size = usize::try_from(VAULT_ACCOUNT_SIZE)?; + /// let lamports = rpc_client.get_minimum_balance_for_rent_exemption(vault_account_size)?; + /// + /// // The on-chain program's instruction data, imported from that program's crate. + /// let instr_data = InstructionData { + /// vault_bump_seed, + /// lamports, + /// }; + /// + /// // The accounts required by both our on-chain program and the system program's + /// // `create_account` instruction, including the vault's address. + /// let accounts = vec![ + /// AccountMeta::new(payer.pubkey(), true), + /// AccountMeta::new(vault_pubkey, false), + /// AccountMeta::new(system_program::ID, false), + /// ]; + /// + /// // Create the instruction by serializing our instruction data via borsh + /// let instruction = Instruction::new_with_borsh( + /// program_id, + /// &instr_data, + /// accounts, + /// ); + /// + /// let blockhash = rpc_client.get_latest_blockhash()?; + /// + /// let transaction = Transaction::new_signed_with_payer( + /// &[instruction], + /// Some(&payer.pubkey()), + /// &[&payer], + /// blockhash, + /// ); + /// + /// rpc_client.send_and_confirm_transaction(&transaction)?; + /// # Ok::<(), anyhow::Error>(()) + /// ``` pub fn find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> (Pubkey, u8) { Self::try_find_program_address(seeds, program_id) .unwrap_or_else(|| panic!("Unable to find a viable program address bump seed")) } - /// Find a valid program address and its corresponding bump seed which must - /// be passed as an additional seed when calling `invoke_signed`. + /// Find a valid [program derived address][pda] and its corresponding bump seed. /// - /// The processes of finding a valid program address is by trial and error, - /// and even though it is deterministic given a set of inputs it can take a - /// variable amount of time to succeed across different inputs. This means - /// that when called from an on-chain program it may incur a variable amount - /// of the program's compute budget. Programs that are meant to be very - /// performant may not want to use this function because it could take a - /// considerable amount of time. Also, programs that area already at risk - /// of exceeding their compute budget should also call this with care since - /// there is a chance that the program's budget may be occasionally - /// exceeded. + /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses + /// + /// The only difference between this method and [`find_program_address`] + /// is that this one returns `None` in the statistically improbable event + /// that a bump seed cannot be found; or if any of `find_program_address`'s + /// preconditions are violated. + /// + /// See the documentation for [`find_program_address`] for a full description. + /// + /// [`find_program_address`]: Pubkey::find_program_address #[allow(clippy::same_item_push)] pub fn try_find_program_address(seeds: &[&[u8]], program_id: &Pubkey) -> Option<(Pubkey, u8)> { // Perform the calculation inline, calling this from within a program is @@ -344,6 +486,105 @@ impl Pubkey { } } + /// Create a valid [program derived address][pda] without searching for a bump seed. + /// + /// [pda]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses + /// + /// Because this function does not create a bump seed, it may unpredictably + /// return an error for any given set of seeds and is not generally suitable + /// for creating program derived addresses. + /// + /// However, it can be used for efficiently verifying that a set of seeds plus + /// bump seed generated by [`find_program_address`] derives a particular + /// address as expected. See the example for details. + /// + /// See the documentation for [`find_program_address`] for a full description + /// of program derived addresses and bump seeds. + /// + /// [`find_program_address`]: Pubkey::find_program_address + /// + /// # Examples + /// + /// Creating a program derived address involves iteratively searching for a + /// bump seed for which the derived [`Pubkey`] does not lie on the ed25519 + /// curve. This search process is generally performed off-chain, with the + /// [`find_program_address`] function, after which the client passes the + /// bump seed to the program as instruction data. + /// + /// Depending on the application requirements, a program may wish to verify + /// that the set of seeds, plus the bump seed, do correctly generate an + /// expected address. + /// + /// The verification is performed by appending to the other seeds one + /// additional seed slice that contains the single `u8` bump seed, calling + /// `create_program_address`, checking that the return value is `Ok`, and + /// that the returned `Pubkey` has the expected value. + /// + /// ``` + /// # use solana_program::pubkey::Pubkey; + /// # let program_id = Pubkey::new_unique(); + /// let (expected_pda, bump_seed) = Pubkey::find_program_address(&[b"vault"], &program_id); + /// let actual_pda = Pubkey::create_program_address(&[b"vault", &[bump_seed]], &program_id)?; + /// assert_eq!(expected_pda, actual_pda); + /// # Ok::<(), anyhow::Error>(()) + /// ``` + pub fn create_program_address( + seeds: &[&[u8]], + program_id: &Pubkey, + ) -> Result { + if seeds.len() > MAX_SEEDS { + return Err(PubkeyError::MaxSeedLengthExceeded); + } + for seed in seeds.iter() { + if seed.len() > MAX_SEED_LEN { + return Err(PubkeyError::MaxSeedLengthExceeded); + } + } + + // Perform the calculation inline, calling this from within a program is + // not supported + #[cfg(not(target_arch = "bpf"))] + { + let mut hasher = crate::hash::Hasher::default(); + for seed in seeds.iter() { + hasher.hash(seed); + } + hasher.hashv(&[program_id.as_ref(), PDA_MARKER]); + let hash = hasher.result(); + + if bytes_are_curve_point(hash) { + return Err(PubkeyError::InvalidSeeds); + } + + Ok(Pubkey::new(hash.as_ref())) + } + // Call via a system call to perform the calculation + #[cfg(target_arch = "bpf")] + { + extern "C" { + fn sol_create_program_address( + seeds_addr: *const u8, + seeds_len: u64, + program_id_addr: *const u8, + address_bytes_addr: *const u8, + ) -> u64; + } + let mut bytes = [0; 32]; + let result = unsafe { + sol_create_program_address( + seeds as *const _ as *const u8, + seeds.len() as u64, + program_id as *const _ as *const u8, + &mut bytes as *mut _ as *mut u8, + ) + }; + match result { + crate::entrypoint::SUCCESS => Ok(Pubkey::new(&bytes)), + _ => Err(result.into()), + } + } + } + pub fn to_bytes(self) -> [u8; 32] { self.0 } diff --git a/sdk/program/src/slot_hashes.rs b/sdk/program/src/slot_hashes.rs index 1b1e65d8ebefa9..ae9efd7c5d9dbc 100644 --- a/sdk/program/src/slot_hashes.rs +++ b/sdk/program/src/slot_hashes.rs @@ -25,6 +25,9 @@ impl SlotHashes { } (self.0).truncate(MAX_ENTRIES); } + pub fn position(&self, slot: &Slot) -> Option { + self.binary_search_by(|(probe, _)| slot.cmp(probe)).ok() + } #[allow(clippy::trivially_copy_pass_by_ref)] pub fn get(&self, slot: &Slot) -> Option<&Hash> { self.binary_search_by(|(probe, _)| slot.cmp(probe)) diff --git a/sdk/program/src/system_instruction.rs b/sdk/program/src/system_instruction.rs index 59c6255146671c..1ae02021d9bd26 100644 --- a/sdk/program/src/system_instruction.rs +++ b/sdk/program/src/system_instruction.rs @@ -1,5 +1,4 @@ #[allow(deprecated)] -use crate::sysvar::recent_blockhashes; use { crate::{ decode_error::DecodeError, @@ -7,7 +6,7 @@ use { nonce, pubkey::Pubkey, system_program, - sysvar::rent, + sysvar::{recent_blockhashes, rent}, }, num_derive::{FromPrimitive, ToPrimitive}, thiserror::Error, diff --git a/sdk/program/src/sysvar/instructions.rs b/sdk/program/src/sysvar/instructions.rs index bd50d87fb7af76..db2ba248f740b4 100644 --- a/sdk/program/src/sysvar/instructions.rs +++ b/sdk/program/src/sysvar/instructions.rs @@ -13,11 +13,8 @@ crate::declare_sysvar_id!("Sysvar1nstructions1111111111111111111111111", Instruc // Construct the account data for the Instruction sSysvar #[cfg(not(target_arch = "bpf"))] -pub fn construct_instructions_data( - message: &crate::message::SanitizedMessage, - demote_program_write_locks: bool, -) -> Vec { - let mut data = message.serialize_instructions(demote_program_write_locks); +pub fn construct_instructions_data(message: &crate::message::SanitizedMessage) -> Vec { + let mut data = message.serialize_instructions(); // add room for current instruction index. data.resize(data.len() + 2, 0); @@ -154,7 +151,7 @@ mod tests { let key = id(); let mut lamports = 0; - let mut data = construct_instructions_data(&sanitized_message, true); + let mut data = construct_instructions_data(&sanitized_message); let owner = crate::sysvar::id(); let mut account_info = AccountInfo::new( &key, @@ -208,7 +205,7 @@ mod tests { let key = id(); let mut lamports = 0; - let mut data = construct_instructions_data(&sanitized_message, true); + let mut data = construct_instructions_data(&sanitized_message); store_current_index(&mut data, 1); let owner = crate::sysvar::id(); let mut account_info = AccountInfo::new( @@ -266,7 +263,7 @@ mod tests { let key = id(); let mut lamports = 0; - let mut data = construct_instructions_data(&sanitized_message, true); + let mut data = construct_instructions_data(&sanitized_message); store_current_index(&mut data, 1); let owner = crate::sysvar::id(); let mut account_info = AccountInfo::new( diff --git a/sdk/program/src/wasm/hash.rs b/sdk/program/src/wasm/hash.rs new file mode 100644 index 00000000000000..add1e6bbe80657 --- /dev/null +++ b/sdk/program/src/wasm/hash.rs @@ -0,0 +1,57 @@ +//! `Hash` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{hash::*, wasm::display_to_jsvalue}, + js_sys::{Array, Uint8Array}, + wasm_bindgen::{prelude::*, JsCast}, +}; + +#[wasm_bindgen] +impl Hash { + /// Create a new Hash object + /// + /// * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base58_str) = value.as_string() { + base58_str.parse::().map_err(display_to_jsvalue) + } else if let Some(uint8_array) = value.dyn_ref::() { + Ok(Hash::new(&uint8_array.to_vec())) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if n >= 0. && n <= 255. { + bytes.push(n as u8); + continue; + } + } + return Err(format!("Invalid array argument: {:?}", x).into()); + } + Ok(Hash::new(&bytes)) + } else if value.is_undefined() { + Ok(Hash::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base58 string representation of the hash + pub fn toString(&self) -> String { + self.to_string() + } + + /// Checks if two `Hash`s are equal + pub fn equals(&self, other: &Hash) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the hash + pub fn toBytes(&self) -> Box<[u8]> { + self.0.clone().into() + } +} diff --git a/sdk/program/src/wasm/instructions.rs b/sdk/program/src/wasm/instructions.rs new file mode 100644 index 00000000000000..36abe05c6f4d8d --- /dev/null +++ b/sdk/program/src/wasm/instructions.rs @@ -0,0 +1,28 @@ +//! The `Instructions` struct is a workaround for the lack of Vec support in wasm-bindgen +//! (ref: https://github.com/rustwasm/wasm-bindgen/issues/111) +#![cfg(target_arch = "wasm32")] +use {crate::instruction::Instruction, wasm_bindgen::prelude::*}; + +#[wasm_bindgen] +#[derive(Default)] +pub struct Instructions { + instructions: Vec, +} + +#[wasm_bindgen] +impl Instructions { + #[wasm_bindgen(constructor)] + pub fn constructor() -> Instructions { + Instructions::default() + } + + pub fn push(&mut self, instruction: Instruction) { + self.instructions.push(instruction); + } +} + +impl From for Vec { + fn from(instructions: Instructions) -> Self { + instructions.instructions + } +} diff --git a/sdk/program/src/wasm/mod.rs b/sdk/program/src/wasm/mod.rs new file mode 100644 index 00000000000000..801142b487f10e --- /dev/null +++ b/sdk/program/src/wasm/mod.rs @@ -0,0 +1,24 @@ +//! solana-program Javascript interface +#![cfg(target_arch = "wasm32")] +use wasm_bindgen::prelude::*; + +pub mod hash; +pub mod instructions; +pub mod pubkey; +pub mod system_instruction; + +/// Initialize Javascript logging and panic handler +#[wasm_bindgen] +pub fn init() { + use std::sync::Once; + static INIT: Once = Once::new(); + + INIT.call_once(|| { + std::panic::set_hook(Box::new(console_error_panic_hook::hook)); + console_log::init_with_level(log::Level::Info).unwrap(); + }); +} + +pub fn display_to_jsvalue(display: T) -> JsValue { + display.to_string().into() +} diff --git a/sdk/program/src/wasm/pubkey.rs b/sdk/program/src/wasm/pubkey.rs new file mode 100644 index 00000000000000..a3aa27941927eb --- /dev/null +++ b/sdk/program/src/wasm/pubkey.rs @@ -0,0 +1,121 @@ +//! `Pubkey` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{pubkey::*, wasm::display_to_jsvalue}, + js_sys::{Array, Uint8Array}, + wasm_bindgen::{prelude::*, JsCast}, +}; + +fn js_value_to_seeds_vec(array_of_uint8_arrays: &[JsValue]) -> Result>, JsValue> { + let vec_vec_u8 = array_of_uint8_arrays + .iter() + .filter_map(|u8_array| { + u8_array + .dyn_ref::() + .map(|u8_array| u8_array.to_vec()) + }) + .collect::>(); + + if vec_vec_u8.len() != array_of_uint8_arrays.len() { + Err("Invalid Array of Uint8Arrays".into()) + } else { + Ok(vec_vec_u8) + } +} + +#[wasm_bindgen] +impl Pubkey { + /// Create a new Pubkey object + /// + /// * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]` + #[wasm_bindgen(constructor)] + pub fn constructor(value: JsValue) -> Result { + if let Some(base58_str) = value.as_string() { + base58_str.parse::().map_err(display_to_jsvalue) + } else if let Some(uint8_array) = value.dyn_ref::() { + Ok(Pubkey::new(&uint8_array.to_vec())) + } else if let Some(array) = value.dyn_ref::() { + let mut bytes = vec![]; + let iterator = js_sys::try_iter(&array.values())?.expect("array to be iterable"); + for x in iterator { + let x = x?; + + if let Some(n) = x.as_f64() { + if n >= 0. && n <= 255. { + bytes.push(n as u8); + continue; + } + } + return Err(format!("Invalid array argument: {:?}", x).into()); + } + Ok(Pubkey::new(&bytes)) + } else if value.is_undefined() { + Ok(Pubkey::default()) + } else { + Err("Unsupported argument".into()) + } + } + + /// Return the base58 string representation of the public key + pub fn toString(&self) -> String { + self.to_string() + } + + /// Check if a `Pubkey` is on the ed25519 curve. + pub fn isOnCurve(&self) -> bool { + self.is_on_curve() + } + + /// Checks if two `Pubkey`s are equal + pub fn equals(&self, other: &Pubkey) -> bool { + self == other + } + + /// Return the `Uint8Array` representation of the public key + pub fn toBytes(&self) -> Box<[u8]> { + self.0.clone().into() + } + + /// Derive a Pubkey from another Pubkey, string seed, and a program id + pub fn createWithSeed(base: &Pubkey, seed: &str, owner: &Pubkey) -> Result { + Pubkey::create_with_seed(base, seed, owner).map_err(display_to_jsvalue) + } + + /// Derive a program address from seeds and a program id + pub fn createProgramAddress( + seeds: Box<[JsValue]>, + program_id: &Pubkey, + ) -> Result { + let seeds_vec = js_value_to_seeds_vec(&seeds)?; + let seeds_slice = seeds_vec + .iter() + .map(|seed| seed.as_slice()) + .collect::>(); + + Pubkey::create_program_address(seeds_slice.as_slice(), program_id) + .map_err(display_to_jsvalue) + } + + /// Find a valid program address + /// + /// Returns: + /// * `[PubKey, number]` - the program address and bump seed + pub fn findProgramAddress( + seeds: Box<[JsValue]>, + program_id: &Pubkey, + ) -> Result { + let seeds_vec = js_value_to_seeds_vec(&seeds)?; + let seeds_slice = seeds_vec + .iter() + .map(|seed| seed.as_slice()) + .collect::>(); + + let (address, bump_seed) = Pubkey::find_program_address(seeds_slice.as_slice(), program_id); + + let result = Array::new_with_length(2); + result.set(0, address.into()); + result.set(1, bump_seed.into()); + Ok(result.into()) + } +} diff --git a/sdk/program/src/wasm/system_instruction.rs b/sdk/program/src/wasm/system_instruction.rs new file mode 100644 index 00000000000000..94dd636788092c --- /dev/null +++ b/sdk/program/src/wasm/system_instruction.rs @@ -0,0 +1,112 @@ +//! `SystemInstruction` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{instruction::Instruction, pubkey::Pubkey, system_instruction::*}, + wasm_bindgen::prelude::*, +}; + +#[wasm_bindgen] +impl SystemInstruction { + pub fn createAccount( + from_pubkey: &Pubkey, + to_pubkey: &Pubkey, + lamports: u64, + space: u64, + owner: &Pubkey, + ) -> Instruction { + create_account(from_pubkey, to_pubkey, lamports, space, owner) + } + + pub fn createAccountWithSeed( + from_pubkey: &Pubkey, + to_pubkey: &Pubkey, + base: &Pubkey, + seed: &str, + lamports: u64, + space: u64, + owner: &Pubkey, + ) -> Instruction { + create_account_with_seed(from_pubkey, to_pubkey, base, seed, lamports, space, owner) + } + + pub fn assign(pubkey: &Pubkey, owner: &Pubkey) -> Instruction { + assign(pubkey, owner) + } + + pub fn assignWithSeed( + pubkey: &Pubkey, + base: &Pubkey, + seed: &str, + owner: &Pubkey, + ) -> Instruction { + assign_with_seed(pubkey, base, seed, owner) + } + + pub fn transfer(from_pubkey: &Pubkey, to_pubkey: &Pubkey, lamports: u64) -> Instruction { + transfer(from_pubkey, to_pubkey, lamports) + } + + pub fn transferWithSeed( + from_pubkey: &Pubkey, + from_base: &Pubkey, + from_seed: String, + from_owner: &Pubkey, + to_pubkey: &Pubkey, + lamports: u64, + ) -> Instruction { + transfer_with_seed( + from_pubkey, + from_base, + from_seed, + from_owner, + to_pubkey, + lamports, + ) + } + + pub fn allocate(pubkey: &Pubkey, space: u64) -> Instruction { + allocate(pubkey, space) + } + + pub fn allocateWithSeed( + address: &Pubkey, + base: &Pubkey, + seed: &str, + space: u64, + owner: &Pubkey, + ) -> Instruction { + allocate_with_seed(address, base, seed, space, owner) + } + + pub fn createNonceAccount( + from_pubkey: &Pubkey, + nonce_pubkey: &Pubkey, + authority: &Pubkey, + lamports: u64, + ) -> js_sys::Array { + let instructions = create_nonce_account(from_pubkey, nonce_pubkey, authority, lamports); + instructions.into_iter().map(JsValue::from).collect() + } + + pub fn advanceNonceAccount(nonce_pubkey: &Pubkey, authorized_pubkey: &Pubkey) -> Instruction { + advance_nonce_account(nonce_pubkey, authorized_pubkey) + } + + pub fn withdrawNonceAccount( + nonce_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + to_pubkey: &Pubkey, + lamports: u64, + ) -> Instruction { + withdraw_nonce_account(nonce_pubkey, authorized_pubkey, to_pubkey, lamports) + } + + pub fn authorizeNonceAccount( + nonce_pubkey: &Pubkey, + authorized_pubkey: &Pubkey, + new_authority: &Pubkey, + ) -> Instruction { + authorize_nonce_account(nonce_pubkey, authorized_pubkey, new_authority) + } +} diff --git a/sdk/program/tests/hash.mjs b/sdk/program/tests/hash.mjs new file mode 100644 index 00000000000000..4b25857a49d727 --- /dev/null +++ b/sdk/program/tests/hash.mjs @@ -0,0 +1,81 @@ +import { expect } from "chai"; +import { init, Hash } from "crate"; +init(); + +// TODO: wasm_bindgen doesn't currently support exporting constants +const HASH_BYTES = 32; + +describe("Hash", function () { + it("invalid", () => { + expect(() => { + new Hash([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Hash([ + 'invalid', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Hash( + "0x300000000000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Hash( + "0x300000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Hash( + "135693854574979916511997248057056142015550763280047535983739356259273198796800000" + ); + }).to.throw(); + + expect(() => { + new Hash("12345"); + }).to.throw(); + }); + + it("toString", () => { + const key = new Hash("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toString()).to.eq("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + + const key2 = new Hash("1111111111111111111111111111BukQL"); + expect(key2.toString()).to.eq("1111111111111111111111111111BukQL"); + + const key3 = new Hash("11111111111111111111111111111111"); + expect(key3.toString()).to.eq("11111111111111111111111111111111"); + + const key4 = new Hash([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + expect(key4.toString()).to.eq("11111111111111111111111111111111"); + }); + + it("toBytes", () => { + const key = new Hash("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toBytes()).to.deep.equal( + new Uint8Array([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + + const key2 = new Hash(); + expect(key2.toBytes()).to.deep.equal( + new Uint8Array([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + }); +}); diff --git a/sdk/program/tests/pubkey.mjs b/sdk/program/tests/pubkey.mjs new file mode 100644 index 00000000000000..67ee73ba3de0c6 --- /dev/null +++ b/sdk/program/tests/pubkey.mjs @@ -0,0 +1,185 @@ +import { expect } from "chai"; +import { init, Pubkey } from "crate"; +init(); + +// TODO: wasm_bindgen doesn't currently support exporting constants +const MAX_SEED_LEN = 32; + +describe("Pubkey", function () { + it("invalid", () => { + expect(() => { + new Pubkey([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Pubkey([ + 'invalid', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + }).to.throw(); + + expect(() => { + new Pubkey( + "0x300000000000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Pubkey( + "0x300000000000000000000000000000000000000000000000000000000000000" + ); + }).to.throw(); + + expect(() => { + new Pubkey( + "135693854574979916511997248057056142015550763280047535983739356259273198796800000" + ); + }).to.throw(); + + expect(() => { + new Pubkey("12345"); + }).to.throw(); + }); + + it("toString", () => { + const key = new Pubkey("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toString()).to.eq("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + + const key2 = new Pubkey("1111111111111111111111111111BukQL"); + expect(key2.toString()).to.eq("1111111111111111111111111111BukQL"); + + const key3 = new Pubkey("11111111111111111111111111111111"); + expect(key3.toString()).to.eq("11111111111111111111111111111111"); + + const key4 = new Pubkey([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + expect(key4.toString()).to.eq("11111111111111111111111111111111"); + }); + + it("toBytes", () => { + const key = new Pubkey("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + expect(key.toBytes()).to.deep.equal( + new Uint8Array([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + + const key2 = new Pubkey(); + expect(key2.toBytes()).to.deep.equal( + new Uint8Array([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + ]) + ); + }); + + it("isOnCurve", () => { + let onCurve = new Pubkey("J4NYrSRccTUGXP7wmFwiByakqWKZb5RwpiAoskpgAQRb"); + expect(onCurve.isOnCurve()).to.be.true; + + let offCurve = new Pubkey("12rqwuEgBYiGhBrDJStCiqEtzQpTTiZbh7teNVLuYcFA"); + expect(offCurve.isOnCurve()).to.be.false; + }); + + it("equals", () => { + const arrayKey = new Pubkey([ + 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ]); + const base58Key = new Pubkey("CiDwVBFgWV9E5MvXWoLgnEgn2hK7rJikbvfWavzAQz3"); + + expect(arrayKey.equals(base58Key)).to.be.true; + }); + + it("createWithSeed", async () => { + const defaultPublicKey = new Pubkey("11111111111111111111111111111111"); + const derivedKey = Pubkey.createWithSeed( + defaultPublicKey, + "limber chicken: 4/45", + defaultPublicKey + ); + + expect( + derivedKey.equals( + new Pubkey("9h1HyLCW5dZnBVap8C5egQ9Z6pHyjsh5MNy83iPqqRuq") + ) + ).to.be.true; + }); + + it("createProgramAddress", async () => { + const programId = new Pubkey("BPFLoader1111111111111111111111111111111111"); + const publicKey = new Pubkey("SeedPubey1111111111111111111111111111111111"); + + let programAddress = Pubkey.createProgramAddress( + [Buffer.from("", "utf8"), Buffer.from([1])], + programId + ); + expect( + programAddress.equals( + new Pubkey("3gF2KMe9KiC6FNVBmfg9i267aMPvK37FewCip4eGBFcT") + ) + ).to.be.true; + + programAddress = Pubkey.createProgramAddress( + [Buffer.from("☉", "utf8")], + programId + ); + expect( + programAddress.equals( + new Pubkey("7ytmC1nT1xY4RfxCV2ZgyA7UakC93do5ZdyhdF3EtPj7") + ) + ).to.be.true; + + programAddress = Pubkey.createProgramAddress( + [Buffer.from("Talking", "utf8"), Buffer.from("Squirrels", "utf8")], + programId + ); + expect( + programAddress.equals( + new Pubkey("HwRVBufQ4haG5XSgpspwKtNd3PC9GM9m1196uJW36vds") + ) + ).to.be.true; + + programAddress = Pubkey.createProgramAddress( + [publicKey.toBytes()], + programId + ); + expect( + programAddress.equals( + new Pubkey("GUs5qLUfsEHkcMB9T38vjr18ypEhRuNWiePW2LoK4E3K") + ) + ).to.be.true; + + const programAddress2 = Pubkey.createProgramAddress( + [Buffer.from("Talking", "utf8")], + programId + ); + expect(programAddress.equals(programAddress2)).to.eq(false); + + expect(() => { + Pubkey.createProgramAddress([Buffer.alloc(MAX_SEED_LEN + 1)], programId); + }).to.throw(); + }); + + it("findProgramAddress", async () => { + const programId = new Pubkey("BPFLoader1111111111111111111111111111111111"); + let [programAddress, nonce] = Pubkey.findProgramAddress( + [Buffer.from("", "utf8")], + programId + ); + expect( + programAddress.equals( + Pubkey.createProgramAddress( + [Buffer.from("", "utf8"), Buffer.from([nonce])], + programId + ) + ) + ).to.be.true; + }); +}); diff --git a/sdk/src/account.rs b/sdk/src/account.rs index ca19f91a857437..2e8e2fc34ad216 100644 --- a/sdk/src/account.rs +++ b/sdk/src/account.rs @@ -103,6 +103,7 @@ pub trait WritableAccount: ReadableAccount { ); Ok(()) } + fn data_mut(&mut self) -> &mut Vec; fn data_as_mut_slice(&mut self) -> &mut [u8]; fn set_owner(&mut self, owner: Pubkey); fn copy_into_owner_from_slice(&mut self, source: &[u8]); @@ -156,6 +157,9 @@ impl WritableAccount for Account { fn set_lamports(&mut self, lamports: u64) { self.lamports = lamports; } + fn data_mut(&mut self) -> &mut Vec { + &mut self.data + } fn data_as_mut_slice(&mut self) -> &mut [u8] { &mut self.data } @@ -192,9 +196,11 @@ impl WritableAccount for AccountSharedData { fn set_lamports(&mut self, lamports: u64) { self.lamports = lamports; } + fn data_mut(&mut self) -> &mut Vec { + Arc::make_mut(&mut self.data) + } fn data_as_mut_slice(&mut self) -> &mut [u8] { - let data = Arc::make_mut(&mut self.data); - &mut data[..] + &mut self.data_mut()[..] } fn set_owner(&mut self, owner: Pubkey) { self.owner = owner; diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index cef24b5b6faae5..e4b185ba6958cb 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -182,11 +182,11 @@ pub mod demote_program_write_locks { } pub mod ed25519_program_enabled { - solana_sdk::declare_id!("E1TvTNipX8TKNHrhRC8SMuAwQmGY58TZ4drdztP3Gxwc"); + solana_sdk::declare_id!("6ppMXNYLhVd7GcsZ5uV11wQEW7spppiMVfqQv5SXhDpX"); } pub mod return_data_syscall_enabled { - solana_sdk::declare_id!("BJVXq6NdLC7jCDGjfqJv7M1XHD4Y13VrpDqRF2U7UBcC"); + solana_sdk::declare_id!("DwScAzPUjuv65TMbDnFY7AgwmotzWy3xpEJMXM3hZFaB"); } pub mod reduce_required_deploy_balance { @@ -194,7 +194,7 @@ pub mod reduce_required_deploy_balance { } pub mod sol_log_data_syscall_enabled { - solana_sdk::declare_id!("HYPs7jyJ3KwQFdDpuSzMtVKf1MLJDaZRv3CSWvfUqdFo"); + solana_sdk::declare_id!("6uaHcKPGUy4J7emLBgUTeufhJdiwhngW6a1R9B7c2ob9"); } pub mod stakes_remove_delegation_if_inactive { @@ -263,6 +263,22 @@ pub mod reject_empty_instruction_without_program { solana_sdk::declare_id!("9kdtFSrXHQg3hKkbXkQ6trJ3Ja1xpJ22CTFSNAciEwmL"); } +pub mod reject_non_rent_exempt_vote_withdraws { + solana_sdk::declare_id!("7txXZZD6Um59YoLMF7XUNimbMjsqsWhc7g2EniiTrmp1"); +} + +pub mod evict_invalid_stakes_cache_entries { + solana_sdk::declare_id!("EMX9Q7TVFAmQ9V1CggAkhMzhXSg8ECp7fHrWQX2G1chf"); +} + +pub mod reject_all_elf_rw { + solana_sdk::declare_id!("DeMpxgMq51j3rZfNK2hQKZyXknQvqevPSFPJFNTbXxsS"); +} + +pub mod max_tx_account_locks { + solana_sdk::declare_id!("CBkDroRDqm8HwHe6ak9cguPjUomrASEkfmxEaZ5CNNxz"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -323,6 +339,10 @@ lazy_static! { (spl_token_v3_3_0_release::id(), "spl-token v3.3.0 release"), (leave_nonce_on_success::id(), "leave nonce as is on success"), (reject_empty_instruction_without_program::id(), "fail instructions which have native_loader as program_id directly"), + (reject_non_rent_exempt_vote_withdraws::id(), "fail vote withdraw instructions which leave the account non-rent-exempt"), + (evict_invalid_stakes_cache_entries::id(), "evict invalid stakes cache entries on epoch boundaries"), + (reject_all_elf_rw::id(), "reject all read-write data in program elfs"), + (max_tx_account_locks::id(), "enforce max number of locked accounts per transaction"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index af9aa182034775..2df94726a17601 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -47,6 +47,7 @@ pub mod system_transaction; pub mod timing; pub mod transaction; pub mod transport; +pub mod wasm; /// Same as `declare_id` except report that this id has been deprecated pub use solana_sdk_macro::declare_deprecated_id; diff --git a/sdk/src/log.rs b/sdk/src/log.rs index 4cc45cf413cc0c..78a45afaf4a1e8 100644 --- a/sdk/src/log.rs +++ b/sdk/src/log.rs @@ -11,13 +11,4 @@ macro_rules! info { ($msg:expr) => { $crate::log::sol_log($msg) }; - ($arg1:expr, $arg2:expr, $arg3:expr, $arg4:expr, $arg5:expr) => { - $crate::log::sol_log_64( - $arg1 as u64, - $arg2 as u64, - $arg3 as u64, - $arg4 as u64, - $arg5 as u64, - ) - }; } diff --git a/sdk/src/packet.rs b/sdk/src/packet.rs index 27435b3ae677fb..efea21904364c4 100644 --- a/sdk/src/packet.rs +++ b/sdk/src/packet.rs @@ -1,10 +1,10 @@ use { - crate::clock::Slot, bincode::Result, + bitflags::bitflags, serde::Serialize, std::{ fmt, io, - net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, + net::{IpAddr, Ipv4Addr, SocketAddr}, }, }; @@ -14,20 +14,24 @@ use { /// 8 bytes is the size of the fragment header pub const PACKET_DATA_SIZE: usize = 1280 - 40 - 8; -#[derive(Clone, Default, Debug, PartialEq)] +bitflags! { + #[repr(C)] + pub struct PacketFlags: u8 { + const DISCARD = 0b00000001; + const FORWARDED = 0b00000010; + const REPAIR = 0b00000100; + const SIMPLE_VOTE_TX = 0b00001000; + const TRACER_TX = 0b00010000; + } +} + +#[derive(Clone, Debug, PartialEq)] #[repr(C)] pub struct Meta { pub size: usize, - pub forward: bool, - pub repair: bool, - pub discard: bool, - pub addr: [u16; 8], + pub addr: IpAddr, pub port: u16, - pub v6: bool, - pub seed: [u8; 32], - pub slot: Slot, - pub is_tracer_tx: bool, - pub is_simple_vote_tx: bool, + pub flags: PacketFlags, } #[derive(Clone)] @@ -95,40 +99,52 @@ impl PartialEq for Packet { impl Meta { pub fn addr(&self) -> SocketAddr { - if !self.v6 { - let addr = [ - self.addr[0] as u8, - self.addr[1] as u8, - self.addr[2] as u8, - self.addr[3] as u8, - ]; - let ipv4: Ipv4Addr = From::<[u8; 4]>::from(addr); - SocketAddr::new(IpAddr::V4(ipv4), self.port) - } else { - let ipv6: Ipv6Addr = From::<[u16; 8]>::from(self.addr); - SocketAddr::new(IpAddr::V6(ipv6), self.port) - } + SocketAddr::new(self.addr, self.port) + } + + pub fn set_addr(&mut self, socket_addr: &SocketAddr) { + self.addr = socket_addr.ip(); + self.port = socket_addr.port(); + } + + #[inline] + pub fn discard(&self) -> bool { + self.flags.contains(PacketFlags::DISCARD) + } + + #[inline] + pub fn set_discard(&mut self, discard: bool) { + self.flags.set(PacketFlags::DISCARD, discard); } - pub fn set_addr(&mut self, a: &SocketAddr) { - match *a { - SocketAddr::V4(v4) => { - let ip = v4.ip().octets(); - self.addr[0] = u16::from(ip[0]); - self.addr[1] = u16::from(ip[1]); - self.addr[2] = u16::from(ip[2]); - self.addr[3] = u16::from(ip[3]); - self.addr[4] = 0; - self.addr[5] = 0; - self.addr[6] = 0; - self.addr[7] = 0; - self.v6 = false; - } - SocketAddr::V6(v6) => { - self.addr = v6.ip().segments(); - self.v6 = true; - } + #[inline] + pub fn forwarded(&self) -> bool { + self.flags.contains(PacketFlags::FORWARDED) + } + + #[inline] + pub fn repair(&self) -> bool { + self.flags.contains(PacketFlags::REPAIR) + } + + #[inline] + pub fn is_simple_vote_tx(&self) -> bool { + self.flags.contains(PacketFlags::SIMPLE_VOTE_TX) + } + + #[inline] + pub fn is_tracer_tx(&self) -> bool { + self.flags.contains(PacketFlags::TRACER_TX) + } +} + +impl Default for Meta { + fn default() -> Self { + Self { + size: 0, + addr: IpAddr::V4(Ipv4Addr::UNSPECIFIED), + port: 0, + flags: PacketFlags::empty(), } - self.port = a.port(); } } diff --git a/sdk/src/signer/keypair.rs b/sdk/src/signer/keypair.rs index 3a80cf308e1fe4..63283d403e6eb6 100644 --- a/sdk/src/signer/keypair.rs +++ b/sdk/src/signer/keypair.rs @@ -17,9 +17,11 @@ use { io::{Read, Write}, path::Path, }, + wasm_bindgen::prelude::*, }; /// A vanilla Ed25519 key pair +#[wasm_bindgen] #[derive(Debug)] pub struct Keypair(ed25519_dalek::Keypair); diff --git a/sdk/src/transaction/error.rs b/sdk/src/transaction/error.rs new file mode 100644 index 00000000000000..075635c21d24c3 --- /dev/null +++ b/sdk/src/transaction/error.rs @@ -0,0 +1,124 @@ +use { + crate::{ + instruction::InstructionError, message::SanitizeMessageError, sanitize::SanitizeError, + }, + serde::Serialize, + thiserror::Error, +}; + +/// Reasons a transaction might be rejected. +#[derive( + Error, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample, AbiEnumVisitor, +)] +pub enum TransactionError { + /// An account is already being processed in another transaction in a way + /// that does not support parallelism + #[error("Account in use")] + AccountInUse, + + /// A `Pubkey` appears twice in the transaction's `account_keys`. Instructions can reference + /// `Pubkey`s more than once but the message must contain a list with no duplicate keys + #[error("Account loaded twice")] + AccountLoadedTwice, + + /// Attempt to debit an account but found no record of a prior credit. + #[error("Attempt to debit an account but found no record of a prior credit.")] + AccountNotFound, + + /// Attempt to load a program that does not exist + #[error("Attempt to load a program that does not exist")] + ProgramAccountNotFound, + + /// The from `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction + #[error("Insufficient funds for fee")] + InsufficientFundsForFee, + + /// This account may not be used to pay transaction fees + #[error("This account may not be used to pay transaction fees")] + InvalidAccountForFee, + + /// The bank has seen this transaction before. This can occur under normal operation + /// when a UDP packet is duplicated, as a user error from a client not updating + /// its `recent_blockhash`, or as a double-spend attack. + #[error("This transaction has already been processed")] + AlreadyProcessed, + + /// The bank has not seen the given `recent_blockhash` or the transaction is too old and + /// the `recent_blockhash` has been discarded. + #[error("Blockhash not found")] + BlockhashNotFound, + + /// An error occurred while processing an instruction. The first element of the tuple + /// indicates the instruction index in which the error occurred. + #[error("Error processing Instruction {0}: {1}")] + InstructionError(u8, InstructionError), + + /// Loader call chain is too deep + #[error("Loader call chain is too deep")] + CallChainTooDeep, + + /// Transaction requires a fee but has no signature present + #[error("Transaction requires a fee but has no signature present")] + MissingSignatureForFee, + + /// Transaction contains an invalid account reference + #[error("Transaction contains an invalid account reference")] + InvalidAccountIndex, + + /// Transaction did not pass signature verification + #[error("Transaction did not pass signature verification")] + SignatureFailure, + + /// This program may not be used for executing instructions + #[error("This program may not be used for executing instructions")] + InvalidProgramForExecution, + + /// Transaction failed to sanitize accounts offsets correctly + /// implies that account locks are not taken for this TX, and should + /// not be unlocked. + #[error("Transaction failed to sanitize accounts offsets correctly")] + SanitizeFailure, + + #[error("Transactions are currently disabled due to cluster maintenance")] + ClusterMaintenance, + + /// Transaction processing left an account with an outstanding borrowed reference + #[error("Transaction processing left an account with an outstanding borrowed reference")] + AccountBorrowOutstanding, + + /// Transaction would exceed max Block Cost Limit + #[error("Transaction would exceed max Block Cost Limit")] + WouldExceedMaxBlockCostLimit, + + /// Transaction version is unsupported + #[error("Transaction version is unsupported")] + UnsupportedVersion, + + /// Transaction loads a writable account that cannot be written + #[error("Transaction loads a writable account that cannot be written")] + InvalidWritableAccount, + + /// Transaction would exceed max account limit within the block + #[error("Transaction would exceed max account limit within the block")] + WouldExceedMaxAccountCostLimit, + + /// Transaction would exceed max account data limit within the block + #[error("Transaction would exceed max account data limit within the block")] + WouldExceedMaxAccountDataCostLimit, + + /// Transaction locked too many accounts + #[error("Transaction locked too many accounts")] + TooManyAccountLocks, +} + +impl From for TransactionError { + fn from(_: SanitizeError) -> Self { + Self::SanitizeFailure + } +} + +impl From for TransactionError { + fn from(_err: SanitizeMessageError) -> Self { + Self::SanitizeFailure + } +} diff --git a/sdk/src/transaction/mod.rs b/sdk/src/transaction/mod.rs index 5ed77660eed76e..c75aa6c15dd5af 100644 --- a/sdk/src/transaction/mod.rs +++ b/sdk/src/transaction/mod.rs @@ -5,8 +5,8 @@ use { crate::{ hash::Hash, - instruction::{CompiledInstruction, Instruction, InstructionError}, - message::{Message, SanitizeMessageError}, + instruction::{CompiledInstruction, Instruction}, + message::Message, nonce::NONCED_TX_MARKER_IX_INDEX, precompiles::verify_if_precompile, program_utils::limited_deserialize, @@ -15,115 +15,19 @@ use { short_vec, signature::{Signature, SignerError}, signers::Signers, + wasm_bindgen, }, serde::Serialize, solana_program::{system_instruction::SystemInstruction, system_program}, solana_sdk::feature_set, std::{result, sync::Arc}, - thiserror::Error, }; +mod error; mod sanitized; mod versioned; -pub use {sanitized::*, versioned::*}; - -/// Reasons a transaction might be rejected. -#[derive( - Error, Serialize, Deserialize, Debug, PartialEq, Eq, Clone, AbiExample, AbiEnumVisitor, -)] -pub enum TransactionError { - /// An account is already being processed in another transaction in a way - /// that does not support parallelism - #[error("Account in use")] - AccountInUse, - - /// A `Pubkey` appears twice in the transaction's `account_keys`. Instructions can reference - /// `Pubkey`s more than once but the message must contain a list with no duplicate keys - #[error("Account loaded twice")] - AccountLoadedTwice, - - /// Attempt to debit an account but found no record of a prior credit. - #[error("Attempt to debit an account but found no record of a prior credit.")] - AccountNotFound, - - /// Attempt to load a program that does not exist - #[error("Attempt to load a program that does not exist")] - ProgramAccountNotFound, - - /// The from `Pubkey` does not have sufficient balance to pay the fee to schedule the transaction - #[error("Insufficient funds for fee")] - InsufficientFundsForFee, - - /// This account may not be used to pay transaction fees - #[error("This account may not be used to pay transaction fees")] - InvalidAccountForFee, - - /// The bank has seen this transaction before. This can occur under normal operation - /// when a UDP packet is duplicated, as a user error from a client not updating - /// its `recent_blockhash`, or as a double-spend attack. - #[error("This transaction has already been processed")] - AlreadyProcessed, - - /// The bank has not seen the given `recent_blockhash` or the transaction is too old and - /// the `recent_blockhash` has been discarded. - #[error("Blockhash not found")] - BlockhashNotFound, - - /// An error occurred while processing an instruction. The first element of the tuple - /// indicates the instruction index in which the error occurred. - #[error("Error processing Instruction {0}: {1}")] - InstructionError(u8, InstructionError), - - /// Loader call chain is too deep - #[error("Loader call chain is too deep")] - CallChainTooDeep, - - /// Transaction requires a fee but has no signature present - #[error("Transaction requires a fee but has no signature present")] - MissingSignatureForFee, - - /// Transaction contains an invalid account reference - #[error("Transaction contains an invalid account reference")] - InvalidAccountIndex, - - /// Transaction did not pass signature verification - #[error("Transaction did not pass signature verification")] - SignatureFailure, - - /// This program may not be used for executing instructions - #[error("This program may not be used for executing instructions")] - InvalidProgramForExecution, - - /// Transaction failed to sanitize accounts offsets correctly - /// implies that account locks are not taken for this TX, and should - /// not be unlocked. - #[error("Transaction failed to sanitize accounts offsets correctly")] - SanitizeFailure, - - #[error("Transactions are currently disabled due to cluster maintenance")] - ClusterMaintenance, - - /// Transaction processing left an account with an outstanding borrowed reference - #[error("Transaction processing left an account with an outstanding borrowed reference")] - AccountBorrowOutstanding, - - /// Transaction would exceed max Block Cost Limit - #[error("Transaction would exceed max Block Cost Limit")] - WouldExceedMaxBlockCostLimit, - - /// Transaction version is unsupported - #[error("Transaction version is unsupported")] - UnsupportedVersion, - - /// Transaction loads a writable account that cannot be written - #[error("Transaction loads a writable account that cannot be written")] - InvalidWritableAccount, - - /// Transaction would exceed max account limit within the block - #[error("Transaction would exceed max account limit within the block")] - WouldExceedMaxAccountCostLimit, -} +pub use {error::*, sanitized::*, versioned::*}; #[derive(PartialEq, Clone, Copy, Debug)] pub enum TransactionVerificationMode { @@ -134,24 +38,8 @@ pub enum TransactionVerificationMode { pub type Result = result::Result; -impl From for TransactionError { - fn from(_: SanitizeError) -> Self { - Self::SanitizeFailure - } -} - -impl From for TransactionError { - fn from(err: SanitizeMessageError) -> Self { - match err { - SanitizeMessageError::IndexOutOfBounds - | SanitizeMessageError::ValueOutOfBounds - | SanitizeMessageError::InvalidValue => Self::SanitizeFailure, - SanitizeMessageError::DuplicateAccountKey => Self::AccountLoadedTwice, - } - } -} - /// An atomic transaction +#[wasm_bindgen] #[frozen_abi(digest = "FZtncnS1Xk8ghHfKiXE5oGiUbw2wJhmfXQuNgQR3K6Mc")] #[derive(Debug, PartialEq, Default, Eq, Clone, Serialize, Deserialize, AbiExample)] pub struct Transaction { @@ -161,10 +49,12 @@ pub struct Transaction { /// [`account_keys`]: Message::account_keys /// // NOTE: Serialization-related changes must be paired with the direct read at sigverify. + #[wasm_bindgen(skip)] #[serde(with = "short_vec")] pub signatures: Vec, /// The message to sign. + #[wasm_bindgen(skip)] pub message: Message, } @@ -525,7 +415,7 @@ pub fn uses_durable_nonce(tx: &Transaction) -> Option<&CompiledInstruction> { // Nonce account is writable && matches!( instruction.accounts.get(0), - Some(index) if message.is_writable(*index as usize, true) + Some(index) if message.is_writable(*index as usize) ) }) } diff --git a/sdk/src/transaction/sanitized.rs b/sdk/src/transaction/sanitized.rs index 2768e665da6238..ac0b1671805788 100644 --- a/sdk/src/transaction/sanitized.rs +++ b/sdk/src/transaction/sanitized.rs @@ -3,7 +3,10 @@ use { crate::{ hash::Hash, - message::{v0, MappedAddresses, MappedMessage, SanitizedMessage, VersionedMessage}, + message::{ + v0::{self, LoadedAddresses}, + SanitizedMessage, VersionedMessage, + }, nonce::NONCED_TX_MARKER_IX_INDEX, precompiles::verify_if_precompile, program_utils::limited_deserialize, @@ -17,6 +20,11 @@ use { std::sync::Arc, }; +/// Maximum number of accounts that a transaction may lock. +/// 64 was chosen because it is roughly twice the previous +/// number of account keys that could fit in a legacy tx. +pub const MAX_TX_ACCOUNT_LOCKS: usize = 64; + /// Sanitized transaction and the hash of its message #[derive(Debug, Clone)] pub struct SanitizedTransaction { @@ -37,29 +45,25 @@ pub struct TransactionAccountLocks<'a> { impl SanitizedTransaction { /// Create a sanitized transaction from an unsanitized transaction. - /// If the input transaction uses address maps, attempt to map the - /// transaction keys to full addresses. + /// If the input transaction uses address tables, attempt to lookup + /// the address for each table index. pub fn try_create( tx: VersionedTransaction, message_hash: Hash, is_simple_vote_tx: Option, - address_mapper: impl Fn(&v0::Message) -> Result, + address_loader: impl Fn(&v0::Message) -> Result, ) -> Result { tx.sanitize()?; let signatures = tx.signatures; let message = match tx.message { VersionedMessage::Legacy(message) => SanitizedMessage::Legacy(message), - VersionedMessage::V0(message) => SanitizedMessage::V0(MappedMessage { - mapped_addresses: address_mapper(&message)?, + VersionedMessage::V0(message) => SanitizedMessage::V0(v0::LoadedMessage { + loaded_addresses: address_loader(&message)?, message, }), }; - if message.has_duplicates() { - return Err(TransactionError::AccountLoadedTwice); - } - let is_simple_vote_tx = is_simple_vote_tx.unwrap_or_else(|| { let mut ix_iter = message.program_instructions_iter(); ix_iter.next().map(|(program_id, _ix)| program_id) == Some(&crate::vote::program::id()) @@ -73,20 +77,20 @@ impl SanitizedTransaction { }) } - /// Create a sanitized transaction from a legacy transaction. Used for tests only. - pub fn from_transaction_for_tests(tx: Transaction) -> Self { - tx.sanitize().unwrap(); - - if tx.message.has_duplicates() { - Result::::Err(TransactionError::AccountLoadedTwice).unwrap(); - } + pub fn try_from_legacy_transaction(tx: Transaction) -> Result { + tx.sanitize()?; - Self { + Ok(Self { message_hash: tx.message.hash(), message: SanitizedMessage::Legacy(tx.message), is_simple_vote_tx: false, signatures: tx.signatures, - } + }) + } + + /// Create a sanitized transaction from a legacy transaction. Used for tests only. + pub fn from_transaction_for_tests(tx: Transaction) -> Self { + Self::try_from_legacy_transaction(tx).unwrap() } /// Return the first signature for this transaction. @@ -125,9 +129,9 @@ impl SanitizedTransaction { pub fn to_versioned_transaction(&self) -> VersionedTransaction { let signatures = self.signatures.clone(); match &self.message { - SanitizedMessage::V0(mapped_msg) => VersionedTransaction { + SanitizedMessage::V0(sanitized_msg) => VersionedTransaction { signatures, - message: VersionedMessage::V0(mapped_msg.message.clone()), + message: VersionedMessage::V0(sanitized_msg.message.clone()), }, SanitizedMessage::Legacy(message) => VersionedTransaction { signatures, @@ -136,8 +140,24 @@ impl SanitizedTransaction { } } + /// Validate and return the account keys locked by this transaction + pub fn get_account_locks( + &self, + feature_set: &feature_set::FeatureSet, + ) -> Result { + if self.message.has_duplicates() { + Err(TransactionError::AccountLoadedTwice) + } else if feature_set.is_active(&feature_set::max_tx_account_locks::id()) + && self.message.account_keys_len() > MAX_TX_ACCOUNT_LOCKS + { + Err(TransactionError::TooManyAccountLocks) + } else { + Ok(self.get_account_locks_unchecked()) + } + } + /// Return the list of accounts that must be locked during processing this transaction. - pub fn get_account_locks(&self, demote_program_write_locks: bool) -> TransactionAccountLocks { + pub fn get_account_locks_unchecked(&self) -> TransactionAccountLocks { let message = &self.message; let num_readonly_accounts = message.num_readonly_accounts(); let num_writable_accounts = message @@ -150,7 +170,7 @@ impl SanitizedTransaction { }; for (i, key) in message.account_keys_iter().enumerate() { - if message.is_writable(i, demote_program_write_locks) { + if message.is_writable(i) { account_locks.writable.push(key); } else { account_locks.readonly.push(key); @@ -180,7 +200,7 @@ impl SanitizedTransaction { .and_then(|ix| { ix.accounts.get(0).and_then(|idx| { let idx = *idx as usize; - if nonce_must_be_writable && !self.message.is_writable(idx, true) { + if nonce_must_be_writable && !self.message.is_writable(idx) { None } else { self.message.get_account_key(idx) @@ -193,7 +213,7 @@ impl SanitizedTransaction { fn message_data(&self) -> Vec { match &self.message { SanitizedMessage::Legacy(message) => message.serialize(), - SanitizedMessage::V0(mapped_msg) => mapped_msg.message.serialize(), + SanitizedMessage::V0(message) => message.serialize(), } } diff --git a/sdk/src/wasm/keypair.rs b/sdk/src/wasm/keypair.rs new file mode 100644 index 00000000000000..6f2ffebbb7ccf5 --- /dev/null +++ b/sdk/src/wasm/keypair.rs @@ -0,0 +1,34 @@ +//! `Keypair` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::signer::{keypair::Keypair, Signer}, + solana_program::{pubkey::Pubkey, wasm::display_to_jsvalue}, + wasm_bindgen::prelude::*, +}; + +#[wasm_bindgen] +impl Keypair { + /// Create a new `Keypair ` + #[wasm_bindgen(constructor)] + pub fn constructor() -> Keypair { + Keypair::new() + } + + /// Convert a `Keypair` to a `Uint8Array` + pub fn toBytes(&self) -> Box<[u8]> { + self.to_bytes().into() + } + + /// Recover a `Keypair` from a `Uint8Array` + pub fn fromBytes(bytes: &[u8]) -> Result { + Keypair::from_bytes(bytes).map_err(display_to_jsvalue) + } + + /// Return the `Pubkey` for this `Keypair` + #[wasm_bindgen(js_name = pubkey)] + pub fn js_pubkey(&self) -> Pubkey { + // `wasm_bindgen` does not support traits (`Signer) yet + self.pubkey() + } +} diff --git a/sdk/src/wasm/mod.rs b/sdk/src/wasm/mod.rs new file mode 100644 index 00000000000000..6946e730f79fde --- /dev/null +++ b/sdk/src/wasm/mod.rs @@ -0,0 +1,5 @@ +//! solana-sdk Javascript interface +#![cfg(target_arch = "wasm32")] + +pub mod keypair; +pub mod transaction; diff --git a/sdk/src/wasm/transaction.rs b/sdk/src/wasm/transaction.rs new file mode 100644 index 00000000000000..4b8bc6f82534a6 --- /dev/null +++ b/sdk/src/wasm/transaction.rs @@ -0,0 +1,58 @@ +//! `Transaction` Javascript interface +#![cfg(target_arch = "wasm32")] +#![allow(non_snake_case)] +use { + crate::{ + hash::Hash, + signer::keypair::Keypair, + {message::Message, transaction::Transaction}, + }, + solana_program::{ + pubkey::Pubkey, + wasm::{display_to_jsvalue, instructions::Instructions}, + }, + wasm_bindgen::prelude::*, +}; + +#[wasm_bindgen] +impl Transaction { + /// Create a new `Transaction` + #[wasm_bindgen(constructor)] + pub fn constructor(instructions: Instructions, payer: Option) -> Transaction { + let instructions: Vec<_> = instructions.into(); + Transaction::new_with_payer(&instructions, payer.as_ref()) + } + + /// Return a message containing all data that should be signed. + #[wasm_bindgen(js_name = message)] + pub fn js_message(&self) -> Message { + self.message.clone() + } + + /// Return the serialized message data to sign. + pub fn messageData(&self) -> Box<[u8]> { + self.message_data().into() + } + + /// Verify the transaction + #[wasm_bindgen(js_name = verify)] + pub fn js_verify(&self) -> Result<(), JsValue> { + self.verify().map_err(display_to_jsvalue) + } + + pub fn partialSign(&mut self, keypair: &Keypair, recent_blockhash: &Hash) { + self.partial_sign(&[keypair], *recent_blockhash); + } + + pub fn isSigned(&self) -> bool { + self.is_signed() + } + + pub fn toBytes(&self) -> Box<[u8]> { + bincode::serialize(self).unwrap().into() + } + + pub fn fromBytes(bytes: &[u8]) -> Result { + bincode::deserialize(bytes).map_err(display_to_jsvalue) + } +} diff --git a/sdk/tests/keypair.mjs b/sdk/tests/keypair.mjs new file mode 100644 index 00000000000000..092ba511ba0a9f --- /dev/null +++ b/sdk/tests/keypair.mjs @@ -0,0 +1,14 @@ +import { expect } from "chai"; +import { init, Keypair } from "crate"; +init(); + +describe("Keypair", function () { + it("works", () => { + const keypair = new Keypair(); + let bytes = keypair.toBytes(); + expect(bytes).to.have.length(64); + + const recoveredKeypair = Keypair.fromBytes(bytes); + expect(keypair.pubkey().equals(recoveredKeypair.pubkey())); + }); +}); diff --git a/sdk/tests/transaction.mjs b/sdk/tests/transaction.mjs new file mode 100644 index 00000000000000..c672b8c460848d --- /dev/null +++ b/sdk/tests/transaction.mjs @@ -0,0 +1,56 @@ +import { expect } from "chai"; +import { + init, + Pubkey, + Keypair, + Hash, + SystemInstruction, + Instructions, + Transaction, +} from "crate"; +init(); + +describe("Transaction", function () { + it("SystemInstruction::Transfer", () => { + const payer = Keypair.fromBytes( + new Uint8Array([ + 241, 230, 222, 64, 184, 48, 232, 92, 156, 210, 229, 183, 154, 251, 5, + 227, 98, 184, 34, 234, 39, 106, 62, 210, 166, 187, 31, 44, 40, 96, 24, + 51, 252, 28, 2, 120, 234, 212, 139, 111, 96, 8, 168, 204, 34, 72, 199, + 205, 117, 165, 82, 51, 32, 93, 211, 36, 239, 245, 139, 218, 99, 211, + 207, 177, + ]) + ); + + const src = Keypair.fromBytes( + new Uint8Array([ + 172, 219, 139, 103, 154, 105, 92, 23, 227, 108, 174, 80, 215, 227, 62, + 8, 66, 38, 151, 239, 148, 184, 180, 148, 149, 18, 106, 94, 73, 143, 27, + 132, 193, 64, 199, 93, 222, 83, 172, 224, 116, 205, 54, 38, 191, 178, + 149, 71, 65, 132, 46, 71, 126, 81, 63, 254, 21, 101, 90, 52, 67, 204, + 128, 199, + ]) + ); + + const dst = new Pubkey("11111111111111111111111111111112"); + + const recent_blockhash = new Hash( + "EETubP5AKHgjPAhzPAFcb8BAY1hMH639CWCFTqi3hq1k" + ); + + let instructions = new Instructions(); + instructions.push( + SystemInstruction.transfer(src.pubkey(), dst, BigInt(123)) + ); + + let transaction = new Transaction(instructions, payer.pubkey()); + transaction.partialSign(payer, recent_blockhash); + transaction.partialSign(src, recent_blockhash); + expect(transaction.isSigned()).to.be.true; + transaction.verify(); + + expect(Buffer.from(transaction.toBytes()).toString("base64")).to.equal( + "AoZrVzP93eyp3vbl6CU9XQjQfm4Xp/7nSiBlsX/kJmfTQZsGTOrFnt6EUqHVte97fGZ71UAXDfLbR5B31OtRdgdab57BOU8mq0ztMutZAVBPtGJHVly8RPz4TYa+OFU7EIk3Wrv4WUMCb/NR+LxELLH+tQt5SrkvB7rCE2DniM8JAgABBPwcAnjq1ItvYAiozCJIx811pVIzIF3TJO/1i9pj08+xwUDHXd5TrOB0zTYmv7KVR0GELkd+UT/+FWVaNEPMgMcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAxJrndgN4IFTxep3s6kO0ROug7bEsbx0xxuDkqEvwUusBAwIBAgwCAAAAewAAAAAAAAA=" + ); + }); +}); diff --git a/send-transaction-service/Cargo.toml b/send-transaction-service/Cargo.toml index cc9894497440cb..eeb30312e0b7e1 100644 --- a/send-transaction-service/Cargo.toml +++ b/send-transaction-service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-send-transaction-service" -version = "1.9.0" +version = "1.9.4" description = "Solana send transaction service" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,10 +11,10 @@ edition = "2021" [dependencies] log = "0.4.14" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/stake-accounts/Cargo.toml b/stake-accounts/Cargo.toml index 4c5824f2da1e12..b578965d2762b3 100644 --- a/stake-accounts/Cargo.toml +++ b/stake-accounts/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-stake-accounts" description = "Blockchain, Rebuilt for Scale" authors = ["Solana Maintainers "] edition = "2021" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -11,15 +11,15 @@ documentation = "https://docs.rs/solana-stake-accounts" [dependencies] clap = "2.33.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-stake-program = { path = "../programs/stake", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-stake-program = { path = "../programs/stake", version = "=1.9.4" } [dev-dependencies] -solana-runtime = { path = "../runtime", version = "=1.9.0" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/storage-bigtable/Cargo.toml b/storage-bigtable/Cargo.toml index 96cc2705d50625..113db50d289421 100644 --- a/storage-bigtable/Cargo.toml +++ b/storage-bigtable/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-storage-bigtable" -version = "1.9.0" +version = "1.9.4" description = "Solana Storage BigTable" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -22,10 +22,10 @@ prost-types = "0.9.0" serde = "1.0.130" serde_derive = "1.0.103" smpl_jwt = "0.6.1" -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-storage-proto = { path = "../storage-proto", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-storage-proto = { path = "../storage-proto", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } thiserror = "1.0" tonic = { version = "0.6.1", features = ["tls", "transport"] } zstd = "0.9.0" diff --git a/storage-bigtable/build-proto/Cargo.lock b/storage-bigtable/build-proto/Cargo.lock index b6fca911e4002d..38e66581e7d771 100644 --- a/storage-bigtable/build-proto/Cargo.lock +++ b/storage-bigtable/build-proto/Cargo.lock @@ -217,7 +217,7 @@ dependencies = [ [[package]] name = "proto" -version = "1.9.0" +version = "1.9.4" dependencies = [ "tonic-build", ] diff --git a/storage-bigtable/build-proto/Cargo.toml b/storage-bigtable/build-proto/Cargo.toml index 474b5c26d8f8b7..a35ae2143db222 100644 --- a/storage-bigtable/build-proto/Cargo.toml +++ b/storage-bigtable/build-proto/Cargo.toml @@ -7,7 +7,7 @@ license = "Apache-2.0" name = "proto" publish = false repository = "https://github.com/solana-labs/solana" -version = "1.9.0" +version = "1.9.4" [workspace] diff --git a/storage-proto/Cargo.toml b/storage-proto/Cargo.toml index 6f25039959fb74..40d09cce68dd1b 100644 --- a/storage-proto/Cargo.toml +++ b/storage-proto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-storage-proto" -version = "1.9.0" +version = "1.9.4" description = "Solana Storage Protobuf Definitions" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -14,9 +14,9 @@ bincode = "1.3.3" bs58 = "0.4.0" prost = "0.9.0" serde = "1.0.130" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } [dev-dependencies] enum-iterator = "0.7.0" diff --git a/storage-proto/proto/transaction_by_addr.proto b/storage-proto/proto/transaction_by_addr.proto index 36c17832ee9b85..19b16518009952 100644 --- a/storage-proto/proto/transaction_by_addr.proto +++ b/storage-proto/proto/transaction_by_addr.proto @@ -45,6 +45,8 @@ enum TransactionErrorType { UNSUPPORTED_VERSION = 18; INVALID_WRITABLE_ACCOUNT = 19; WOULD_EXCEED_MAX_ACCOUNT_COST_LIMIT = 20; + WOULD_EXCEED_MAX_ACCOUNT_DATA_COST_LIMIT = 21; + TOO_MANY_ACCOUNT_LOCKS = 22; } message InstructionError { diff --git a/storage-proto/src/convert.rs b/storage-proto/src/convert.rs index e8511867c6b3a8..6732ee2b6b6c56 100644 --- a/storage-proto/src/convert.rs +++ b/storage-proto/src/convert.rs @@ -567,6 +567,8 @@ impl TryFrom for TransactionError { 18 => TransactionError::UnsupportedVersion, 19 => TransactionError::InvalidWritableAccount, 20 => TransactionError::WouldExceedMaxAccountCostLimit, + 21 => TransactionError::WouldExceedMaxAccountDataCostLimit, + 22 => TransactionError::TooManyAccountLocks, _ => return Err("Invalid TransactionError"), }) } @@ -637,6 +639,12 @@ impl From for tx_by_addr::TransactionError { TransactionError::WouldExceedMaxAccountCostLimit => { tx_by_addr::TransactionErrorType::WouldExceedMaxAccountCostLimit } + TransactionError::WouldExceedMaxAccountDataCostLimit => { + tx_by_addr::TransactionErrorType::WouldExceedMaxAccountDataCostLimit + } + TransactionError::TooManyAccountLocks => { + tx_by_addr::TransactionErrorType::TooManyAccountLocks + } } as i32, instruction_error: match transaction_error { TransactionError::InstructionError(index, ref instruction_error) => { diff --git a/streamer/Cargo.toml b/streamer/Cargo.toml index ac6e9af1afdab7..ebe0d61a812a05 100644 --- a/streamer/Cargo.toml +++ b/streamer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-streamer" -version = "1.9.0" +version = "1.9.4" description = "Solana Streamer" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -12,13 +12,13 @@ edition = "2021" [dependencies] itertools = "0.10.1" log = "0.4.14" -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } thiserror = "1.0" -solana-logger = { path = "../logger", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } libc = "0.2.108" nix = "0.23.0" -solana-perf = { path = "../perf", version = "=1.9.0" } +solana-perf = { path = "../perf", version = "=1.9.4" } [dev-dependencies] diff --git a/streamer/src/packet.rs b/streamer/src/packet.rs index 58688ef80ec98a..34404143f1304c 100644 --- a/streamer/src/packet.rs +++ b/streamer/src/packet.rs @@ -9,13 +9,13 @@ use { }; pub use { solana_perf::packet::{ - limited_deserialize, to_packets_chunked, Packets, PacketsRecycler, NUM_PACKETS, + limited_deserialize, to_packet_batches, PacketBatch, PacketBatchRecycler, NUM_PACKETS, PACKETS_PER_BATCH, }, solana_sdk::packet::{Meta, Packet, PACKET_DATA_SIZE}, }; -pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Result { +pub fn recv_from(batch: &mut PacketBatch, socket: &UdpSocket, max_wait_ms: u64) -> Result { let mut i = 0; //DOCUMENTED SIDE-EFFECT //Performance out of the IO without poll @@ -27,11 +27,11 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res trace!("receiving on {}", socket.local_addr().unwrap()); let start = Instant::now(); loop { - obj.packets.resize( + batch.packets.resize( std::cmp::min(i + NUM_RCVMMSGS, PACKETS_PER_BATCH), Packet::default(), ); - match recv_mmsg(socket, &mut obj.packets[i..]) { + match recv_mmsg(socket, &mut batch.packets[i..]) { Err(_) if i > 0 => { if start.elapsed().as_millis() as u64 > max_wait_ms { break; @@ -41,7 +41,7 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res trace!("recv_from err {:?}", e); return Err(e); } - Ok((_, npkts)) => { + Ok(npkts) => { if i == 0 { socket.set_nonblocking(true)?; } @@ -55,17 +55,17 @@ pub fn recv_from(obj: &mut Packets, socket: &UdpSocket, max_wait_ms: u64) -> Res } } } - obj.packets.truncate(i); + batch.packets.truncate(i); inc_new_counter_debug!("packets-recv_count", i); Ok(i) } pub fn send_to( - obj: &Packets, + batch: &PacketBatch, socket: &UdpSocket, socket_addr_space: &SocketAddrSpace, ) -> Result<()> { - for p in &obj.packets { + for p in &batch.packets { let addr = p.meta.addr(); if socket_addr_space.check(&addr) { socket.send_to(&p.data[..p.meta.size], &addr)?; @@ -90,9 +90,9 @@ mod tests { // test that the address is actually being updated let send_addr: SocketAddr = "127.0.0.1:123".parse().unwrap(); let packets = vec![Packet::default()]; - let mut msgs = Packets::new(packets); - msgs.set_addr(&send_addr); - assert_eq!(msgs.packets[0].meta.addr(), send_addr); + let mut packet_batch = PacketBatch::new(packets); + packet_batch.set_addr(&send_addr); + assert_eq!(packet_batch.packets[0].meta.addr(), send_addr); } #[test] @@ -102,21 +102,25 @@ mod tests { let addr = recv_socket.local_addr().unwrap(); let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); let saddr = send_socket.local_addr().unwrap(); - let mut p = Packets::default(); + let mut batch = PacketBatch::default(); - p.packets.resize(10, Packet::default()); + batch.packets.resize(10, Packet::default()); - for m in p.packets.iter_mut() { + for m in batch.packets.iter_mut() { m.meta.set_addr(&addr); m.meta.size = PACKET_DATA_SIZE; } - send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); + send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); - let recvd = recv_from(&mut p, &recv_socket, 1).unwrap(); + batch + .packets + .iter_mut() + .for_each(|pkt| pkt.meta = Meta::default()); + let recvd = recv_from(&mut batch, &recv_socket, 1).unwrap(); - assert_eq!(recvd, p.packets.len()); + assert_eq!(recvd, batch.packets.len()); - for m in &p.packets { + for m in &batch.packets { assert_eq!(m.meta.size, PACKET_DATA_SIZE); assert_eq!(m.meta.addr(), saddr); } @@ -125,7 +129,7 @@ mod tests { #[test] pub fn debug_trait() { write!(io::sink(), "{:?}", Packet::default()).unwrap(); - write!(io::sink(), "{:?}", Packets::default()).unwrap(); + write!(io::sink(), "{:?}", PacketBatch::default()).unwrap(); } #[test] @@ -151,25 +155,25 @@ mod tests { let recv_socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); let addr = recv_socket.local_addr().unwrap(); let send_socket = UdpSocket::bind("127.0.0.1:0").expect("bind"); - let mut p = Packets::default(); - p.packets.resize(PACKETS_PER_BATCH, Packet::default()); + let mut batch = PacketBatch::default(); + batch.packets.resize(PACKETS_PER_BATCH, Packet::default()); // Should only get PACKETS_PER_BATCH packets per iteration even // if a lot more were sent, and regardless of packet size for _ in 0..2 * PACKETS_PER_BATCH { - let mut p = Packets::default(); - p.packets.resize(1, Packet::default()); - for m in p.packets.iter_mut() { + let mut batch = PacketBatch::default(); + batch.packets.resize(1, Packet::default()); + for m in batch.packets.iter_mut() { m.meta.set_addr(&addr); m.meta.size = 1; } - send_to(&p, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); + send_to(&batch, &send_socket, &SocketAddrSpace::Unspecified).unwrap(); } - let recvd = recv_from(&mut p, &recv_socket, 100).unwrap(); + let recvd = recv_from(&mut batch, &recv_socket, 100).unwrap(); // Check we only got PACKETS_PER_BATCH packets assert_eq!(recvd, PACKETS_PER_BATCH); - assert_eq!(p.packets.capacity(), PACKETS_PER_BATCH); + assert_eq!(batch.packets.capacity(), PACKETS_PER_BATCH); } } diff --git a/streamer/src/recvmmsg.rs b/streamer/src/recvmmsg.rs index 897ba07990002f..b9713a2b7285ec 100644 --- a/streamer/src/recvmmsg.rs +++ b/streamer/src/recvmmsg.rs @@ -2,7 +2,7 @@ pub use solana_perf::packet::NUM_RCVMMSGS; use { - crate::packet::Packet, + crate::packet::{Meta, Packet}, std::{cmp, io, net::UdpSocket}, }; #[cfg(target_os = "linux")] @@ -14,10 +14,10 @@ use { }; #[cfg(not(target_os = "linux"))] -pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize, usize)> { +pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result { + debug_assert!(packets.iter().all(|pkt| pkt.meta == Meta::default())); let mut i = 0; let count = cmp::min(NUM_RCVMMSGS, packets.len()); - let mut total_size = 0; for p in packets.iter_mut().take(count) { p.meta.size = 0; match socket.recv_from(&mut p.data) { @@ -28,7 +28,6 @@ pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usiz return Err(e); } Ok((nrecv, from)) => { - total_size += nrecv; p.meta.size = nrecv; p.meta.set_addr(&from); if i == 0 { @@ -38,7 +37,7 @@ pub fn recv_mmsg(socket: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usiz } i += 1; } - Ok((total_size, i)) + Ok(i) } #[cfg(target_os = "linux")] @@ -67,7 +66,9 @@ fn cast_socket_addr(addr: &sockaddr_storage, hdr: &mmsghdr) -> Option #[cfg(target_os = "linux")] #[allow(clippy::uninit_assumed_init)] -pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize, usize)> { +pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result { + // Assert that there are no leftovers in packets. + debug_assert!(packets.iter().all(|pkt| pkt.meta == Meta::default())); const SOCKADDR_STORAGE_SIZE: usize = mem::size_of::(); let mut hdrs: [mmsghdr; NUM_RCVMMSGS] = unsafe { mem::zeroed() }; @@ -95,26 +96,18 @@ pub fn recv_mmsg(sock: &UdpSocket, packets: &mut [Packet]) -> io::Result<(usize, }; let nrecv = unsafe { libc::recvmmsg(sock_fd, &mut hdrs[0], count as u32, MSG_WAITFORONE, &mut ts) }; - if nrecv < 0 { + let nrecv = if nrecv < 0 { return Err(io::Error::last_os_error()); + } else { + usize::try_from(nrecv).unwrap() + }; + for (addr, hdr, pkt) in izip!(addrs, hdrs, packets.iter_mut()).take(nrecv) { + pkt.meta.size = hdr.msg_len as usize; + if let Some(addr) = cast_socket_addr(&addr, &hdr) { + pkt.meta.set_addr(&addr.to_std()); + } } - let mut npkts = 0; - addrs - .iter() - .zip(hdrs) - .take(nrecv as usize) - .filter_map(|(addr, hdr)| { - let addr = cast_socket_addr(addr, &hdr)?.to_std(); - Some((addr, hdr)) - }) - .zip(packets.iter_mut()) - .for_each(|((addr, hdr), pkt)| { - pkt.meta.size = hdr.msg_len as usize; - pkt.meta.set_addr(&addr); - npkts += 1; - }); - let total_size = packets.iter().take(npkts).map(|pkt| pkt.meta.size).sum(); - Ok((total_size, npkts)) + Ok(nrecv) } #[cfg(test)] @@ -148,7 +141,7 @@ mod tests { } let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(sent, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); @@ -174,14 +167,17 @@ mod tests { } let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); assert_eq!(packet.meta.addr(), saddr); } - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + packets + .iter_mut() + .for_each(|pkt| pkt.meta = Meta::default()); + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(sent - TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); @@ -213,7 +209,7 @@ mod tests { let start = Instant::now(); let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); @@ -221,6 +217,9 @@ mod tests { } reader.set_nonblocking(true).unwrap(); + packets + .iter_mut() + .for_each(|pkt| pkt.meta = Meta::default()); let _recv = recv_mmsg(&reader, &mut packets[..]); assert!(start.elapsed().as_secs() < 5); } @@ -250,7 +249,7 @@ mod tests { let mut packets = vec![Packet::default(); TEST_NUM_MSGS]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(TEST_NUM_MSGS, recv); for packet in packets.iter().take(sent1) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); @@ -261,7 +260,10 @@ mod tests { assert_eq!(packet.meta.addr(), saddr2); } - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + packets + .iter_mut() + .for_each(|pkt| pkt.meta = Meta::default()); + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(sent1 + sent2 - TEST_NUM_MSGS, recv); for packet in packets.iter().take(recv) { assert_eq!(packet.meta.size, PACKET_DATA_SIZE); diff --git a/streamer/src/sendmmsg.rs b/streamer/src/sendmmsg.rs index 6e434f5ed5e27e..47abcc0af857d1 100644 --- a/streamer/src/sendmmsg.rs +++ b/streamer/src/sendmmsg.rs @@ -175,7 +175,7 @@ mod tests { assert_eq!(sent, Some(())); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(32, recv); } @@ -206,11 +206,11 @@ mod tests { assert_eq!(sent, Some(())); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(16, recv); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader2, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader2, &mut packets[..]).unwrap(); assert_eq!(16, recv); } @@ -241,19 +241,19 @@ mod tests { assert_eq!(sent, Some(())); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); assert_eq!(1, recv); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader2, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader2, &mut packets[..]).unwrap(); assert_eq!(1, recv); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader3, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader3, &mut packets[..]).unwrap(); assert_eq!(1, recv); let mut packets = vec![Packet::default(); 32]; - let recv = recv_mmsg(&reader4, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader4, &mut packets[..]).unwrap(); assert_eq!(1, recv); } diff --git a/streamer/src/streamer.rs b/streamer/src/streamer.rs index d71a458c1cf7e8..9f7db9c546f6ad 100644 --- a/streamer/src/streamer.rs +++ b/streamer/src/streamer.rs @@ -3,7 +3,7 @@ use { crate::{ - packet::{self, send_to, Packets, PacketsRecycler, PACKETS_PER_BATCH}, + packet::{self, send_to, PacketBatch, PacketBatchRecycler, PACKETS_PER_BATCH}, recvmmsg::NUM_RCVMMSGS, socket::SocketAddrSpace, }, @@ -21,8 +21,8 @@ use { thiserror::Error, }; -pub type PacketReceiver = Receiver; -pub type PacketSender = Sender; +pub type PacketBatchReceiver = Receiver; +pub type PacketBatchSender = Sender; #[derive(Error, Debug)] pub enum StreamerError { @@ -33,7 +33,7 @@ pub enum StreamerError { RecvTimeout(#[from] RecvTimeoutError), #[error("send packets error")] - Send(#[from] SendError), + Send(#[from] SendError), } pub type Result = std::result::Result; @@ -41,8 +41,8 @@ pub type Result = std::result::Result; fn recv_loop( sock: &UdpSocket, exit: Arc, - channel: &PacketSender, - recycler: &PacketsRecycler, + channel: &PacketBatchSender, + recycler: &PacketBatchRecycler, name: &'static str, coalesce_ms: u64, use_pinned_memory: bool, @@ -52,10 +52,10 @@ fn recv_loop( let mut now = Instant::now(); let mut num_max_received = 0; // Number of times maximum packets were received loop { - let mut msgs = if use_pinned_memory { - Packets::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, name) + let mut packet_batch = if use_pinned_memory { + PacketBatch::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, name) } else { - Packets::with_capacity(PACKETS_PER_BATCH) + PacketBatch::with_capacity(PACKETS_PER_BATCH) }; loop { // Check for exit signal, even if socket is busy @@ -63,14 +63,14 @@ fn recv_loop( if exit.load(Ordering::Relaxed) { return Ok(()); } - if let Ok(len) = packet::recv_from(&mut msgs, sock, coalesce_ms) { + if let Ok(len) = packet::recv_from(&mut packet_batch, sock, coalesce_ms) { if len == NUM_RCVMMSGS { num_max_received += 1; } recv_count += len; call_count += 1; if len > 0 { - channel.send(msgs)?; + channel.send(packet_batch)?; } break; } @@ -94,8 +94,8 @@ fn recv_loop( pub fn receiver( sock: Arc, exit: &Arc, - packet_sender: PacketSender, - recycler: PacketsRecycler, + packet_sender: PacketBatchSender, + recycler: PacketBatchRecycler, name: &'static str, coalesce_ms: u64, use_pinned_memory: bool, @@ -121,36 +121,42 @@ pub fn receiver( fn recv_send( sock: &UdpSocket, - r: &PacketReceiver, + r: &PacketBatchReceiver, socket_addr_space: &SocketAddrSpace, ) -> Result<()> { let timer = Duration::new(1, 0); - let msgs = r.recv_timeout(timer)?; - send_to(&msgs, sock, socket_addr_space)?; + let packet_batch = r.recv_timeout(timer)?; + send_to(&packet_batch, sock, socket_addr_space)?; Ok(()) } -pub fn recv_batch(recvr: &PacketReceiver) -> Result<(Vec, usize, Duration)> { +pub fn recv_packet_batches( + recvr: &PacketBatchReceiver, +) -> Result<(Vec, usize, Duration)> { let timer = Duration::new(1, 0); - let msgs = recvr.recv_timeout(timer)?; + let packet_batch = recvr.recv_timeout(timer)?; let recv_start = Instant::now(); - trace!("got msgs"); - let mut len = msgs.packets.len(); - let mut batch = vec![msgs]; - while let Ok(more) = recvr.try_recv() { - trace!("got more msgs"); - len += more.packets.len(); - batch.push(more); + trace!("got packets"); + let mut num_packets = packet_batch.packets.len(); + let mut packet_batches = vec![packet_batch]; + while let Ok(packet_batch) = recvr.try_recv() { + trace!("got more packets"); + num_packets += packet_batch.packets.len(); + packet_batches.push(packet_batch); } let recv_duration = recv_start.elapsed(); - trace!("batch len {}", batch.len()); - Ok((batch, len, recv_duration)) + trace!( + "packet batches len: {}, num packets: {}", + packet_batches.len(), + num_packets + ); + Ok((packet_batches, num_packets, recv_duration)) } pub fn responder( name: &'static str, sock: Arc, - r: PacketReceiver, + r: PacketBatchReceiver, socket_addr_space: SocketAddrSpace, ) -> JoinHandle<()> { Builder::new() @@ -187,7 +193,7 @@ mod test { use { super::*, crate::{ - packet::{Packet, Packets, PACKET_DATA_SIZE}, + packet::{Packet, PacketBatch, PACKET_DATA_SIZE}, streamer::{receiver, responder}, }, solana_perf::recycler::Recycler, @@ -204,16 +210,16 @@ mod test { }, }; - fn get_msgs(r: PacketReceiver, num: &mut usize) { + fn get_packet_batches(r: PacketBatchReceiver, num_packets: &mut usize) { for _ in 0..10 { - let m = r.recv_timeout(Duration::new(1, 0)); - if m.is_err() { + let packet_batch_res = r.recv_timeout(Duration::new(1, 0)); + if packet_batch_res.is_err() { continue; } - *num -= m.unwrap().packets.len(); + *num_packets -= packet_batch_res.unwrap().packets.len(); - if *num == 0 { + if *num_packets == 0 { break; } } @@ -222,7 +228,7 @@ mod test { #[test] fn streamer_debug() { write!(io::sink(), "{:?}", Packet::default()).unwrap(); - write!(io::sink(), "{:?}", Packets::default()).unwrap(); + write!(io::sink(), "{:?}", PacketBatch::default()).unwrap(); } #[test] fn streamer_send_test() { @@ -250,23 +256,23 @@ mod test { r_responder, SocketAddrSpace::Unspecified, ); - let mut msgs = Packets::default(); + let mut packet_batch = PacketBatch::default(); for i in 0..5 { - let mut b = Packet::default(); + let mut p = Packet::default(); { - b.data[0] = i as u8; - b.meta.size = PACKET_DATA_SIZE; - b.meta.set_addr(&addr); + p.data[0] = i as u8; + p.meta.size = PACKET_DATA_SIZE; + p.meta.set_addr(&addr); } - msgs.packets.push(b); + packet_batch.packets.push(p); } - s_responder.send(msgs).expect("send"); + s_responder.send(packet_batch).expect("send"); t_responder }; - let mut num = 5; - get_msgs(r_reader, &mut num); - assert_eq!(num, 0); + let mut packets_remaining = 5; + get_packet_batches(r_reader, &mut packets_remaining); + assert_eq!(packets_remaining, 0); exit.store(true, Ordering::Relaxed); t_receiver.join().expect("join"); t_responder.join().expect("join"); diff --git a/streamer/tests/recvmmsg.rs b/streamer/tests/recvmmsg.rs index 614dc16ee1b25c..7fa18739dfbb10 100644 --- a/streamer/tests/recvmmsg.rs +++ b/streamer/tests/recvmmsg.rs @@ -2,7 +2,7 @@ use { solana_streamer::{ - packet::{Packet, PACKET_DATA_SIZE}, + packet::{Meta, Packet, PACKET_DATA_SIZE}, recvmmsg::*, }, std::{net::UdpSocket, time::Instant}, @@ -25,7 +25,7 @@ pub fn test_recv_mmsg_batch_size() { } let mut packets = vec![Packet::default(); TEST_BATCH_SIZE]; let now = Instant::now(); - let recv = recv_mmsg(&reader, &mut packets[..]).unwrap().1; + let recv = recv_mmsg(&reader, &mut packets[..]).unwrap(); elapsed_in_max_batch += now.elapsed().as_nanos(); assert_eq!(TEST_BATCH_SIZE, recv); }); @@ -40,10 +40,13 @@ pub fn test_recv_mmsg_batch_size() { let mut recv = 0; let now = Instant::now(); while let Ok(num) = recv_mmsg(&reader, &mut packets[..]) { - recv += num.1; + recv += num; if recv >= TEST_BATCH_SIZE { break; } + packets + .iter_mut() + .for_each(|pkt| pkt.meta = Meta::default()); } elapsed_in_small_batch += now.elapsed().as_nanos(); assert_eq!(TEST_BATCH_SIZE, recv); diff --git a/sys-tuner/Cargo.toml b/sys-tuner/Cargo.toml index 94789324213140..f501013933a1cc 100644 --- a/sys-tuner/Cargo.toml +++ b/sys-tuner/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-sys-tuner" description = "The solana cluster system tuner daemon" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,8 +14,8 @@ publish = true clap = "2.33.1" log = "0.4.14" libc = "0.2.108" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [target."cfg(unix)".dependencies] unix_socket2 = "0.5.4" diff --git a/test-validator/Cargo.toml b/test-validator/Cargo.toml index 7dfc9a5a4a309f..111ecc0c9c0a7e 100644 --- a/test-validator/Cargo.toml +++ b/test-validator/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "solana-test-validator" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" homepage = "https://solana.com/" documentation = "https://docs.rs/solana-test-validator" readme = "../README.md" @@ -13,17 +13,20 @@ edition = "2021" [dependencies] base64 = "0.12.3" log = "0.4.14" -solana-client = { path = "../client", version = "=1.9.0" } -solana-core = { path = "../core", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-program-test = { path = "../program-test", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } +serde_derive = "1.0.103" +serde_json = "1.0.72" +solana-cli-output = { path = "../cli-output", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-program-test = { path = "../program-test", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-validator/src/lib.rs b/test-validator/src/lib.rs index df3fc364e721de..bc99f78c9de4c7 100644 --- a/test-validator/src/lib.rs +++ b/test-validator/src/lib.rs @@ -1,6 +1,7 @@ #![allow(clippy::integer_arithmetic)] use { log::*, + solana_cli_output::CliAccount, solana_client::rpc_client::RpcClient, solana_core::{ tower_storage::TowerStorage, @@ -36,15 +37,23 @@ use { solana_streamer::socket::SocketAddrSpace, std::{ collections::HashMap, - fs::remove_dir_all, + fs::{remove_dir_all, File}, + io::Read, net::{IpAddr, Ipv4Addr, SocketAddr}, path::{Path, PathBuf}, + str::FromStr, sync::{Arc, RwLock}, thread::sleep, time::Duration, }, }; +#[derive(Clone)] +pub struct AccountInfo<'a> { + pub address: Pubkey, + pub filename: &'a str, +} + #[derive(Clone)] pub struct ProgramInfo { pub program_id: Pubkey, @@ -94,6 +103,8 @@ pub struct TestValidatorGenesis { pub authorized_voter_keypairs: Arc>>>, pub max_ledger_shreds: Option, pub max_genesis_archive_unpacked_size: Option, + pub accountsdb_plugin_config_files: Option>, + pub accounts_db_caching_enabled: bool, } impl TestValidatorGenesis { @@ -203,6 +214,41 @@ impl TestValidatorGenesis { self } + pub fn add_accounts_from_json_files(&mut self, accounts: &[AccountInfo]) -> &mut Self { + for account in accounts { + let account_path = + solana_program_test::find_file(account.filename).unwrap_or_else(|| { + error!("Unable to locate {}", account.filename); + solana_core::validator::abort(); + }); + let mut file = File::open(&account_path).unwrap(); + let mut account_info_raw = String::new(); + file.read_to_string(&mut account_info_raw).unwrap(); + + let result: serde_json::Result = serde_json::from_str(&account_info_raw); + let account_info = match result { + Err(err) => { + error!( + "Unable to deserialize {}: {}", + account_path.to_str().unwrap(), + err + ); + solana_core::validator::abort(); + } + Ok(deserialized) => deserialized, + }; + let address = Pubkey::from_str(account_info.keyed_account.pubkey.as_str()).unwrap(); + let account = account_info + .keyed_account + .account + .decode::() + .unwrap(); + + self.add_account(address, account); + } + self + } + /// Add an account to the test environment with the account data in the provided `filename` pub fn add_account_with_file_data( &mut self, @@ -510,6 +556,8 @@ impl TestValidator { } let mut validator_config = ValidatorConfig { + accountsdb_plugin_config_files: config.accountsdb_plugin_config_files.clone(), + accounts_db_caching_enabled: config.accounts_db_caching_enabled, rpc_addrs: Some(( SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), node.info.rpc.port()), SocketAddr::new( diff --git a/tokens/Cargo.toml b/tokens/Cargo.toml index 790aa3f0951702..8ea7b82cbfc6f0 100644 --- a/tokens/Cargo.toml +++ b/tokens/Cargo.toml @@ -3,7 +3,7 @@ name = "solana-tokens" description = "Blockchain, Rebuilt for Scale" authors = ["Solana Maintainers "] edition = "2021" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -19,14 +19,14 @@ indexmap = "1.7.0" indicatif = "0.16.2" pickledb = "0.4.1" serde = { version = "1.0", features = ["derive"] } -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-remote-wallet = { path = "../remote-wallet", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } spl-associated-token-account = { version = "=1.0.3" } spl-token = { version = "=3.2.0", features = ["no-entrypoint"] } tempfile = "3.2.0" @@ -34,6 +34,6 @@ thiserror = "1.0" [dev-dependencies] bincode = "1.3.3" -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-test-validator = { path = "../test-validator", version = "=1.9.0" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-test-validator = { path = "../test-validator", version = "=1.9.4" } diff --git a/transaction-dos/Cargo.toml b/transaction-dos/Cargo.toml index 52b6ca926f6c82..2df4714119efc0 100644 --- a/transaction-dos/Cargo.toml +++ b/transaction-dos/Cargo.toml @@ -2,7 +2,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-transaction-dos" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -14,23 +14,23 @@ clap = "2.33.1" log = "0.4.14" rand = "0.7.0" rayon = "1.5.1" -solana-cli = { path = "../cli", version = "=1.9.0" } -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-core = { path = "../core", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-transaction-status = { path = "../transaction-status", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-cli = { path = "../cli", version = "=1.9.4" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-transaction-status = { path = "../transaction-status", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [dev-dependencies] -solana-local-cluster = { path = "../local-cluster", version = "=1.9.0" } +solana-local-cluster = { path = "../local-cluster", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/transaction-status/Cargo.toml b/transaction-status/Cargo.toml index 3512149f866fd0..35bb346c34846f 100644 --- a/transaction-status/Cargo.toml +++ b/transaction-status/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-transaction-status" -version = "1.9.0" +version = "1.9.4" description = "Solana transaction status types" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -19,12 +19,12 @@ log = "0.4.14" serde = "1.0.130" serde_derive = "1.0.103" serde_json = "1.0.72" -solana-account-decoder = { path = "../account-decoder", version = "=1.9.0" } -solana-measure = { path = "../measure", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-account-decoder = { path = "../account-decoder", version = "=1.9.4" } +solana-measure = { path = "../measure", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } spl-associated-token-account = { version = "=1.0.3", features = ["no-entrypoint"] } spl-memo = { version = "=3.0.1", features = ["no-entrypoint"] } spl-token = { version = "=3.2.0", features = ["no-entrypoint"] } diff --git a/transaction-status/src/extract_memos.rs b/transaction-status/src/extract_memos.rs index cb6f7a94451b07..0fb0564805637c 100644 --- a/transaction-status/src/extract_memos.rs +++ b/transaction-status/src/extract_memos.rs @@ -76,7 +76,10 @@ mod test { solana_sdk::{ hash::Hash, instruction::CompiledInstruction, - message::{v0, MappedAddresses, MappedMessage, MessageHeader}, + message::{ + v0::{self, LoadedAddresses}, + MessageHeader, + }, }, }; @@ -125,7 +128,7 @@ mod test { let sanitized_message = SanitizedMessage::Legacy(message); assert_eq!(sanitized_message.extract_memos(), expected_memos); - let mapped_message = MappedMessage { + let sanitized_message = SanitizedMessage::V0(v0::LoadedMessage { message: v0::Message { header: MessageHeader { num_required_signatures: 1, @@ -136,12 +139,11 @@ mod test { instructions: memo_instructions, ..v0::Message::default() }, - mapped_addresses: MappedAddresses { + loaded_addresses: LoadedAddresses { writable: vec![], readonly: vec![spl_memo_id_v1(), another_program_id, spl_memo_id_v3()], }, - }; - let sanitized_mapped_message = SanitizedMessage::V0(mapped_message); - assert_eq!(sanitized_mapped_message.extract_memos(), expected_memos); + }); + assert_eq!(sanitized_message.extract_memos(), expected_memos); } } diff --git a/transaction-status/src/lib.rs b/transaction-status/src/lib.rs index 9cea1d9ee01ac1..77e5aea85a412e 100644 --- a/transaction-status/src/lib.rs +++ b/transaction-status/src/lib.rs @@ -355,7 +355,7 @@ pub struct Reward { pub type Rewards = Vec; -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ConfirmedBlock { pub previous_blockhash: String, @@ -485,7 +485,7 @@ impl From for EncodedConfirmedBlock { } } -#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TransactionDetails { Full, @@ -579,7 +579,7 @@ impl TransactionWithStatusMeta { } } -#[derive(Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct EncodedTransactionWithStatusMeta { pub transaction: EncodedTransaction, @@ -595,7 +595,7 @@ impl TransactionStatusMeta { } } -#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)] +#[derive(Serialize, Deserialize, Clone, Copy, Debug, Eq, Hash, PartialEq)] #[serde(rename_all = "camelCase")] pub enum UiTransactionEncoding { Binary, // Legacy. Retained for RPC backwards compatibility diff --git a/transaction-status/src/parse_accounts.rs b/transaction-status/src/parse_accounts.rs index b6c315db442929..35d8d4c9e6ea7b 100644 --- a/transaction-status/src/parse_accounts.rs +++ b/transaction-status/src/parse_accounts.rs @@ -13,7 +13,7 @@ pub fn parse_accounts(message: &Message) -> Vec { for (i, account_key) in message.account_keys.iter().enumerate() { accounts.push(ParsedAccount { pubkey: account_key.to_string(), - writable: message.is_writable(i, /*demote_program_write_locks=*/ true), + writable: message.is_writable(i), signer: message.is_signer(i), }); } diff --git a/upload-perf/Cargo.toml b/upload-perf/Cargo.toml index a5c3d653602aaa..86b022bda07f9c 100644 --- a/upload-perf/Cargo.toml +++ b/upload-perf/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-upload-perf" -version = "1.9.0" +version = "1.9.4" description = "Metrics Upload Utility" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -11,7 +11,7 @@ publish = false [dependencies] serde_json = "1.0.72" -solana-metrics = { path = "../metrics", version = "=1.9.0" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } [[bin]] name = "solana-upload-perf" diff --git a/validator/Cargo.toml b/validator/Cargo.toml index e842ce5ad8224b..58894a31cd5d0b 100644 --- a/validator/Cargo.toml +++ b/validator/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-validator" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -25,30 +25,30 @@ jsonrpc-server-utils= "18.0.0" log = "0.4.14" num_cpus = "1.13.0" rand = "0.7.0" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-core = { path = "../core", version = "=1.9.0" } -solana-download-utils = { path = "../download-utils", version = "=1.9.0" } -solana-entry = { path = "../entry", version = "=1.9.0" } -solana-faucet = { path = "../faucet", version = "=1.9.0" } -solana-genesis-utils = { path = "../genesis-utils", version = "=1.9.0" } -solana-gossip = { path = "../gossip", version = "=1.9.0" } -solana-ledger = { path = "../ledger", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-net-utils = { path = "../net-utils", version = "=1.9.0" } -solana-perf = { path = "../perf", version = "=1.9.0" } -solana-poh = { path = "../poh", version = "=1.9.0" } -solana-replica-lib = { path = "../replica-lib", version = "=1.9.0" } -solana-rpc = { path = "../rpc", version = "=1.9.0" } -solana-runtime = { path = "../runtime", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.0" } -solana-streamer = { path = "../streamer", version = "=1.9.0" } -solana-test-validator = { path = "../test-validator", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } -solana-vote-program = { path = "../programs/vote", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-core = { path = "../core", version = "=1.9.4" } +solana-download-utils = { path = "../download-utils", version = "=1.9.4" } +solana-entry = { path = "../entry", version = "=1.9.4" } +solana-faucet = { path = "../faucet", version = "=1.9.4" } +solana-genesis-utils = { path = "../genesis-utils", version = "=1.9.4" } +solana-gossip = { path = "../gossip", version = "=1.9.4" } +solana-ledger = { path = "../ledger", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-net-utils = { path = "../net-utils", version = "=1.9.4" } +solana-perf = { path = "../perf", version = "=1.9.4" } +solana-poh = { path = "../poh", version = "=1.9.4" } +solana-replica-lib = { path = "../replica-lib", version = "=1.9.4" } +solana-rpc = { path = "../rpc", version = "=1.9.4" } +solana-runtime = { path = "../runtime", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-send-transaction-service = { path = "../send-transaction-service", version = "=1.9.4" } +solana-streamer = { path = "../streamer", version = "=1.9.4" } +solana-test-validator = { path = "../test-validator", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } +solana-vote-program = { path = "../programs/vote", version = "=1.9.4" } symlink = "0.1.0" [target.'cfg(not(target_env = "msvc"))'.dependencies] diff --git a/validator/src/bin/solana-test-validator.rs b/validator/src/bin/solana-test-validator.rs index 4310b79acefedc..34bc7ec7409ce0 100644 --- a/validator/src/bin/solana-test-validator.rs +++ b/validator/src/bin/solana-test-validator.rs @@ -1,5 +1,5 @@ use { - clap::{crate_name, value_t, value_t_or_exit, App, Arg}, + clap::{crate_name, value_t, value_t_or_exit, values_t_or_exit, App, Arg}, log::*, solana_clap_utils::{ input_parsers::{pubkey_of, pubkeys_of, value_of}, @@ -24,9 +24,10 @@ use { system_program, }, solana_streamer::socket::SocketAddrSpace, + solana_test_validator::*, solana_validator::{ admin_rpc_service, dashboard::Dashboard, ledger_lockfile, lock_ledger, println_name_value, - redirect_stderr_to_file, solana_test_validator::*, + redirect_stderr_to_file, }, std::{ collections::HashSet, @@ -166,6 +167,19 @@ fn main() { First argument can be a public key or path to file that can be parsed as a keypair", ), ) + .arg( + Arg::with_name("account") + .long("account") + .value_name("ADDRESS FILENAME.JSON") + .takes_value(true) + .number_of_values(2) + .multiple(true) + .help( + "Load an account from the provided JSON file (see `solana account --help` on how to dump \ + an account to file). Files are searched for relatively to CWD and tests/fixtures. \ + If the ledger already exists then this parameter is silently ignored", + ), + ) .arg( Arg::with_name("no_bpf_jit") .long("no-bpf-jit") @@ -282,6 +296,20 @@ fn main() { If the ledger already exists then this parameter is silently ignored", ), ) + .arg( + Arg::with_name("accountsdb_plugin_config") + .long("accountsdb-plugin-config") + .value_name("FILE") + .takes_value(true) + .multiple(true) + .hidden(true) + .help("Specify the configuration file for the AccountsDb plugin."), + ) + .arg( + Arg::with_name("no_accounts_db_caching") + .long("no-accounts-db-caching") + .help("Disables accounts caching"), + ) .get_matches(); let output = if matches.is_present("quiet") { @@ -394,7 +422,7 @@ fn main() { faucet_port, )); - let mut programs = vec![]; + let mut programs_to_load = vec![]; if let Some(values) = matches.values_of("bpf_program") { let values: Vec<&str> = values.collect::>(); for address_program in values.chunks(2) { @@ -417,7 +445,7 @@ fn main() { exit(1); } - programs.push(ProgramInfo { + programs_to_load.push(ProgramInfo { program_id: address, loader: solana_sdk::bpf_loader::id(), program_path, @@ -428,7 +456,25 @@ fn main() { } } - let clone_accounts: HashSet<_> = pubkeys_of(&matches, "clone_account") + let mut accounts_to_load = vec![]; + if let Some(values) = matches.values_of("account") { + let values: Vec<&str> = values.collect::>(); + for address_filename in values.chunks(2) { + match address_filename { + [address, filename] => { + let address = address.parse::().unwrap_or_else(|err| { + println!("Error: invalid address {}: {}", address, err); + exit(1); + }); + + accounts_to_load.push(AccountInfo { address, filename }); + } + _ => unreachable!(), + } + } + } + + let accounts_to_clone: HashSet<_> = pubkeys_of(&matches, "clone_account") .map(|v| v.into_iter().collect()) .unwrap_or_default(); @@ -490,6 +536,7 @@ fn main() { for (name, long) in &[ ("bpf_program", "--bpf-program"), ("clone_account", "--clone"), + ("account", "--account"), ("mint_address", "--mint"), ("slots_per_epoch", "--slots-per-epoch"), ("faucet_sol", "--faucet-sol"), @@ -508,6 +555,7 @@ fn main() { let mut genesis = TestValidatorGenesis::default(); genesis.max_ledger_shreds = value_of(&matches, "limit_ledger_size"); genesis.max_genesis_archive_unpacked_size = Some(u64::MAX); + genesis.accounts_db_caching_enabled = !matches.is_present("no_accounts_db_caching"); let tower_storage = Arc::new(FileTowerStorage::new(ledger_path.clone())); @@ -555,11 +603,12 @@ fn main() { }) .bpf_jit(!matches.is_present("no_bpf_jit")) .rpc_port(rpc_port) - .add_programs_with_path(&programs); + .add_programs_with_path(&programs_to_load) + .add_accounts_from_json_files(&accounts_to_load); - if !clone_accounts.is_empty() { + if !accounts_to_clone.is_empty() { genesis.clone_accounts( - clone_accounts, + accounts_to_clone, cluster_rpc_client .as_ref() .expect("bug: --url argument missing?"), @@ -596,6 +645,15 @@ fn main() { genesis.bind_ip_addr(bind_address); } + if matches.is_present("accountsdb_plugin_config") { + genesis.accountsdb_plugin_config_files = Some( + values_t_or_exit!(matches, "accountsdb_plugin_config", String) + .into_iter() + .map(PathBuf::from) + .collect(), + ); + } + match genesis.start_with_mint_address(mint_address, socket_addr_space) { Ok(test_validator) => { *admin_service_cluster_info.write().unwrap() = Some(test_validator.cluster_info()); diff --git a/validator/src/lib.rs b/validator/src/lib.rs index 347437c2404b16..fe29b45d6dd15f 100644 --- a/validator/src/lib.rs +++ b/validator/src/lib.rs @@ -13,7 +13,10 @@ use { thread::JoinHandle, }, }; -pub use {solana_gossip::cluster_info::MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, solana_test_validator}; +pub use { + solana_gossip::cluster_info::MINIMUM_VALIDATOR_PORT_RANGE_WIDTH, + solana_test_validator as test_validator, +}; pub mod admin_rpc_service; pub mod bootstrap; diff --git a/validator/src/main.rs b/validator/src/main.rs index 67ac40cb212a0f..b7a5e7618116df 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -1203,6 +1203,12 @@ pub fn main() { .default_value("4") .help("PubSub worker threads"), ) + .arg( + Arg::with_name("rpc_pubsub_enable_block_subscription") + .long("rpc-pubsub-enable-block-subscription") + .takes_value(false) + .help("Enable the unstable RPC PubSub `blockSubscribe` subscription"), + ) .arg( Arg::with_name("rpc_pubsub_enable_vote_subscription") .long("rpc-pubsub-enable-vote-subscription") @@ -2217,6 +2223,7 @@ pub fn main() { ) }), pubsub_config: PubSubConfig { + enable_block_subscription: matches.is_present("rpc_pubsub_enable_block_subscription"), enable_vote_subscription: matches.is_present("rpc_pubsub_enable_vote_subscription"), max_active_subscriptions: value_t_or_exit!( matches, diff --git a/version/Cargo.toml b/version/Cargo.toml index 4d00f370f6158f..3f7a053b3d94ce 100644 --- a/version/Cargo.toml +++ b/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "solana-version" -version = "1.9.0" +version = "1.9.4" description = "Solana Version" authors = ["Solana Maintainers "] repository = "https://github.com/solana-labs/solana" @@ -13,9 +13,9 @@ edition = "2021" log = "0.4.14" serde = "1.0.130" serde_derive = "1.0.103" -solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.0" } -solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } +solana-frozen-abi = { path = "../frozen-abi", version = "=1.9.4" } +solana-frozen-abi-macro = { path = "../frozen-abi/macro", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } [lib] name = "solana_version" diff --git a/watchtower/Cargo.toml b/watchtower/Cargo.toml index 4a13abd383f0b6..0cb57e18b07928 100644 --- a/watchtower/Cargo.toml +++ b/watchtower/Cargo.toml @@ -3,7 +3,7 @@ authors = ["Solana Maintainers "] edition = "2021" name = "solana-watchtower" description = "Blockchain, Rebuilt for Scale" -version = "1.9.0" +version = "1.9.4" repository = "https://github.com/solana-labs/solana" license = "Apache-2.0" homepage = "https://solana.com/" @@ -13,15 +13,15 @@ documentation = "https://docs.rs/solana-watchtower" clap = "2.33.1" log = "0.4.14" humantime = "2.0.1" -solana-clap-utils = { path = "../clap-utils", version = "=1.9.0" } -solana-cli-config = { path = "../cli-config", version = "=1.9.0" } -solana-cli-output = { path = "../cli-output", version = "=1.9.0" } -solana-client = { path = "../client", version = "=1.9.0" } -solana-logger = { path = "../logger", version = "=1.9.0" } -solana-metrics = { path = "../metrics", version = "=1.9.0" } -solana-notifier = { path = "../notifier", version = "=1.9.0" } -solana-sdk = { path = "../sdk", version = "=1.9.0" } -solana-version = { path = "../version", version = "=1.9.0" } +solana-clap-utils = { path = "../clap-utils", version = "=1.9.4" } +solana-cli-config = { path = "../cli-config", version = "=1.9.4" } +solana-cli-output = { path = "../cli-output", version = "=1.9.4" } +solana-client = { path = "../client", version = "=1.9.4" } +solana-logger = { path = "../logger", version = "=1.9.4" } +solana-metrics = { path = "../metrics", version = "=1.9.4" } +solana-notifier = { path = "../notifier", version = "=1.9.4" } +solana-sdk = { path = "../sdk", version = "=1.9.4" } +solana-version = { path = "../version", version = "=1.9.4" } [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"]