From 15d8c0e80e1c313c051e3e95dd7d1322710c60d5 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Thu, 3 Feb 2022 18:47:11 -0500 Subject: [PATCH 01/31] Test new fc logging macros --- .gitmodules | 6 ++++++ libraries/CMakeLists.txt | 10 ++++++++++ libraries/fmt | 1 + libraries/spdlog | 1 + plugins/net_plugin/CMakeLists.txt | 4 ++-- plugins/net_plugin/net_plugin.cpp | 11 +++++++++++ 6 files changed, 31 insertions(+), 2 deletions(-) create mode 160000 libraries/fmt create mode 160000 libraries/spdlog diff --git a/.gitmodules b/.gitmodules index f3d406ce8f4..66245315bc5 100644 --- a/.gitmodules +++ b/.gitmodules @@ -28,3 +28,9 @@ [submodule "libraries/appbase"] path = libraries/appbase url = https://github.com/eosio/appbase +[submodule "libraries/spdlog"] + path = libraries/spdlog + url = https://github.com/gabime/spdlog +[submodule "libraries/fmt"] + path = libraries/fmt + url = https://github.com/fmtlib/fmt diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index a761f68f0e3..16a7b5b39a3 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -74,3 +74,13 @@ option(AMQP-CPP_LINUX_TCP CACHE ON) add_subdirectory( amqp-cpp EXCLUDE_FROM_ALL ) target_include_directories(amqpcpp PRIVATE "${OPENSSL_INCLUDE_DIR}") remove_definitions( -w ) + +# Suppress warnings on 3rdParty Library +add_definitions( -w ) +add_subdirectory( spdlog ) +remove_definitions( -w ) + +# Suppress warnings on 3rdParty Library +add_definitions( -w ) +add_subdirectory( fmt ) +remove_definitions( -w ) \ No newline at end of file diff --git a/libraries/fmt b/libraries/fmt new file mode 160000 index 00000000000..35c0286cd8f --- /dev/null +++ b/libraries/fmt @@ -0,0 +1 @@ +Subproject commit 35c0286cd8f1365bffbc417021e8cd23112f6c8f diff --git a/libraries/spdlog b/libraries/spdlog new file mode 160000 index 00000000000..8fb112158a6 --- /dev/null +++ b/libraries/spdlog @@ -0,0 +1 @@ +Subproject commit 8fb112158a64afe1bd58c82c06bf6a142c9b3823 diff --git a/plugins/net_plugin/CMakeLists.txt b/plugins/net_plugin/CMakeLists.txt index 3b8c1cd7b71..38f56585ceb 100644 --- a/plugins/net_plugin/CMakeLists.txt +++ b/plugins/net_plugin/CMakeLists.txt @@ -3,5 +3,5 @@ add_library( net_plugin net_plugin.cpp ${HEADERS} ) -target_link_libraries( net_plugin chain_plugin producer_plugin appbase fc ) -target_include_directories( net_plugin PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include") +target_link_libraries( net_plugin chain_plugin producer_plugin appbase fc spdlog fmt) +target_include_directories( net_plugin PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/include ${CMAKE_CURRENT_SOURCE_DIR}/../chain_interface/include "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/spdlog/include" "${CMAKE_CURRENT_SOURCE_DIR}/../../libraries/appbase/include" ) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 4adb1df9527..b92056f2bf8 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -344,6 +344,9 @@ namespace eosio { fc::logger logger; std::string peer_log_format; + auto new_logger = spdlog::stdout_logger_st("test"); + + // peer_[x]log must be called from thread in connection strand #define peer_dlog( PEER, FORMAT, ... ) \ FC_MULTILINE_MACRO_BEGIN \ @@ -3739,6 +3742,14 @@ namespace eosio { void net_plugin::plugin_initialize( const variables_map& options ) { fc_ilog( logger, "Initialize net plugin" ); + // call spdlog logging method directly + new_logger->info("Initialize net plugin - spdlog method"); + // call spdlog logging macro + new_logger->set_pattern("%v"); + SPDLOG_LOGGER_INFO(new_logger, "Initialize net plugin - spdlog Macro"); + // call updated fc logging macro + fc_new_ilog(new_logger, "Initialize net plugin - new fc Macro"); + try { peer_log_format = options.at( "peer-log-format" ).as(); From 476a59341001ec10c73544205230a28422721506 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Thu, 3 Feb 2022 19:06:15 -0500 Subject: [PATCH 02/31] Add a new fc logging macro --- libraries/fc | 2 +- libraries/spdlog | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/fc b/libraries/fc index cd76dceef5f..dc3a378080c 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit cd76dceef5f91d4b12404e6d39d332216a11c52c +Subproject commit dc3a378080c3ae31805592f62b4397cf46c85d18 diff --git a/libraries/spdlog b/libraries/spdlog index 8fb112158a6..62430ee3d4e 160000 --- a/libraries/spdlog +++ b/libraries/spdlog @@ -1 +1 @@ -Subproject commit 8fb112158a64afe1bd58c82c06bf6a142c9b3823 +Subproject commit 62430ee3d4e306f2527916ed852595b5e66e3d2d From 526ecebd72ffeb4b78914bce976d5d740a6ec90b Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Thu, 3 Feb 2022 23:04:55 -0500 Subject: [PATCH 03/31] Update fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index dc3a378080c..b3348eb3a5b 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit dc3a378080c3ae31805592f62b4397cf46c85d18 +Subproject commit b3348eb3a5ba2de9842b977e38c92d1179a4dc84 From d74c87fa8782ca883b6356bcb45cc2a56861fd15 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 8 Feb 2022 13:38:05 -0500 Subject: [PATCH 04/31] Test new fc macro fc_new_dlog --- .../eosio/producer_plugin/producer_plugin.hpp | 2 ++ plugins/producer_plugin/producer_plugin.cpp | 19 ++++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 03cc4f2b94f..871b8f8b368 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -5,6 +5,8 @@ #include +#include +#include namespace eosio { using boost::signals2::signal; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 765fa39b1e1..289ec6e80dd 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -68,6 +68,9 @@ using boost::signals2::scoped_connection; const std::string logger_name("producer_plugin"); fc::logger _log; +//auto new_log = spdlog::create("producer_plugin"); +auto new_log = spdlog::stdout_logger_st("producer_plugin"); + const std::string trx_successful_trace_logger_name("transaction_success_tracing"); fc::logger _trx_successful_trace_log; @@ -2150,8 +2153,22 @@ void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) ); + fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}", - ("num", chain.head_block_num() + 1)( "time", deadline ) ); + ( "time", deadline )("num", chain.head_block_num() + 1) ); + + // spdlog default level is `info` + new_log->set_level(spdlog::level::debug); + // use the new formatter defined in fc/time.hpp to format custom type `fc::time_point` + fc_new_dlog( new_log, "fc_new_dlog: Scheduling Block Production on Normal Block {} for {}", + chain.head_block_num() + 1, deadline ); + + // use fmt::vformat to format custom type `fc::time_point` + fmt::dynamic_format_arg_store store; + store.push_back(deadline); + auto deadline_str = spdlog::fmt_lib::vformat("{}", store); + fc_new_dlog(new_log, "fc_new_dlog: Scheduling Block Production on Normal Block {} for {}", chain.head_block_num() + 1, deadline_str); + } else { EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); From 8f19243d29c19a916b90ec861701469c7e58531a Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 8 Feb 2022 13:42:25 -0500 Subject: [PATCH 05/31] Update fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index b3348eb3a5b..1312e4e825a 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit b3348eb3a5ba2de9842b977e38c92d1179a4dc84 +Subproject commit 1312e4e825a681e8caff4fd0e5359a80530f0a2e From 1ffe4a282daa5dc0111c99a1eda04b18f8683137 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 8 Feb 2022 15:17:32 -0500 Subject: [PATCH 06/31] Access arguments by name --- plugins/producer_plugin/producer_plugin.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 289ec6e80dd..e6d2ce4bc77 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -2169,6 +2169,10 @@ void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { auto deadline_str = spdlog::fmt_lib::vformat("{}", store); fc_new_dlog(new_log, "fc_new_dlog: Scheduling Block Production on Normal Block {} for {}", chain.head_block_num() + 1, deadline_str); + auto str = fmt::format("fc_new_dlog: Scheduling Block Production on Normal Block #{num} for {time}", + fmt::arg("time", deadline_str), fmt::arg("num", chain.head_block_num() + 1)); + fc_new_dlog( new_log, "{}", str); + } else { EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); From 6bfe7ef7ea665fddaf708457276d6f7a54cd2f8e Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 8 Feb 2022 16:40:40 -0500 Subject: [PATCH 07/31] Test new macro FC_FMT --- libraries/fc | 2 +- plugins/producer_plugin/producer_plugin.cpp | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 1312e4e825a..a12d9ec86fd 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 1312e4e825a681e8caff4fd0e5359a80530f0a2e +Subproject commit a12d9ec86fdc0ab77976cd6b205477233b6df8af diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index e6d2ce4bc77..c2727dad0fa 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -2169,10 +2169,15 @@ void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { auto deadline_str = spdlog::fmt_lib::vformat("{}", store); fc_new_dlog(new_log, "fc_new_dlog: Scheduling Block Production on Normal Block {} for {}", chain.head_block_num() + 1, deadline_str); + // auto str = fmt::format("fc_new_dlog: Scheduling Block Production on Normal Block #{num} for {time}", fmt::arg("time", deadline_str), fmt::arg("num", chain.head_block_num() + 1)); fc_new_dlog( new_log, "{}", str); + // + fc_new_dlog_2(new_log, "fc_new_dlog_2: Scheduling Block Production on Normal Block #{num} for {time}", + ("time", deadline_str)("num", chain.head_block_num() + 1)); + } else { EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); From 0ea081fd78f98e9370d34b7600f1c2b02a11e0f3 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 14 Feb 2022 01:39:41 -0500 Subject: [PATCH 08/31] Update fc log macros to make them call spdlog macors --- libraries/amqp/CMakeLists.txt | 4 +- .../eosio/amqp/retrying_amqp_connection.hpp | 15 ++ libraries/chain/apply_context.cpp | 16 +- libraries/chain/authorization_manager.cpp | 50 +++--- libraries/chain/backing_store/db_context.cpp | 48 +++--- libraries/chain/block_log.cpp | 6 +- libraries/chain/controller.cpp | 31 ++-- .../backing_store/kv_context_chainbase.hpp | 12 +- .../backing_store/kv_context_rocksdb.hpp | 12 +- .../chain/include/eosio/chain/controller.hpp | 2 +- libraries/chain/protocol_feature_manager.cpp | 2 +- libraries/chain/resource_limits.cpp | 65 ++++---- libraries/chain/transaction_context.cpp | 4 +- libraries/fc | 2 +- plugins/net_plugin/net_plugin.cpp | 12 +- plugins/producer_plugin/producer_plugin.cpp | 142 ++++++++---------- .../state_history_plugin.cpp | 8 +- 17 files changed, 218 insertions(+), 213 deletions(-) diff --git a/libraries/amqp/CMakeLists.txt b/libraries/amqp/CMakeLists.txt index f1593796b6d..acfd5164594 100644 --- a/libraries/amqp/CMakeLists.txt +++ b/libraries/amqp/CMakeLists.txt @@ -4,5 +4,5 @@ add_library(amqp transactional_amqp_publisher.cpp util.cpp ) -target_include_directories(amqp PUBLIC include) -target_link_libraries(amqp fc amqpcpp) +target_include_directories(amqp PUBLIC include ../spdlog/include) +target_link_libraries(amqp fc amqpcpp spdlog) diff --git a/libraries/amqp/include/eosio/amqp/retrying_amqp_connection.hpp b/libraries/amqp/include/eosio/amqp/retrying_amqp_connection.hpp index 39a4672de7c..dec3dcddeb9 100644 --- a/libraries/amqp/include/eosio/amqp/retrying_amqp_connection.hpp +++ b/libraries/amqp/include/eosio/amqp/retrying_amqp_connection.hpp @@ -66,3 +66,18 @@ struct single_channel_retrying_amqp_connection { }; } + +namespace fmt { + template<> + struct formatter { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const AMQP::Address& p, FormatContext& ctx ) { + return format_to( ctx.out(), "{}", (std::string)p ); + } + }; +} + + diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 741a5a5c550..38d216843fd 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -585,9 +585,9 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if (auto dm_logger = control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "DTRX_OP MODIFY_CANCEL ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", ("action_id", get_action_id()) - ("sender", receiver) + ("sender", receiver.to_string()) ("sender_id", sender_id) - ("payer", ptr->payer) + ("payer", ptr->payer.to_string()) ("published", ptr->published) ("delay", ptr->delay_until) ("expiration", ptr->expiration) @@ -616,9 +616,9 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a fc_dlog(*dm_logger, "DTRX_OP MODIFY_CREATE ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", ("action_id", get_action_id()) - ("sender", receiver) + ("sender", receiver.to_string()) ("sender_id", sender_id) - ("payer", payer) + ("payer", payer.to_string()) ("published", gtx.published) ("delay", gtx.delay_until) ("expiration", gtx.expiration) @@ -645,9 +645,9 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a fc_dlog(*dm_logger, "DTRX_OP CREATE ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", ("action_id", get_action_id()) - ("sender", receiver) + ("sender", receiver.to_string()) ("sender_id", sender_id) - ("payer", payer) + ("payer", payer.to_string()) ("published", gtx.published) ("delay", gtx.delay_until) ("expiration", gtx.expiration) @@ -679,9 +679,9 @@ bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, acc fc_dlog(*dm_logger, "DTRX_OP CANCEL ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", ("action_id", get_action_id()) - ("sender", receiver) + ("sender", receiver.to_string()) ("sender_id", sender_id) - ("payer", gto->payer) + ("payer", gto->payer.to_string()) ("published", gto->published) ("delay", gto->delay_until) ("expiration", gto->expiration) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 1427b3c3bff..b972537157d 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -159,11 +159,12 @@ namespace eosio { namespace chain { p.auth = auth; if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", - ("action_id", action_id) - ("permission_id", p.id) - ("data", p) - ); + //TODO: add formatter for custom type `permission_object` +// fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", +// ("action_id", action_id) +// ("permission_id", p.id) +// ("data", p) +// ); } }); return perm; @@ -199,11 +200,12 @@ namespace eosio { namespace chain { p.auth = std::move(auth); if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", - ("action_id", action_id) - ("permission_id", p.id) - ("data", p) - ); + //TODO: add formatter for custom type `permission_object` +// fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", +// ("action_id", action_id) +// ("permission_id", p.id) +// ("data", p) +// ); } }); return perm; @@ -226,14 +228,15 @@ namespace eosio { namespace chain { po.last_updated = _control.pending_block_time(); if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP UPD ${action_id} ${permission_id} ${data}", - ("action_id", action_id) - ("permission_id", po.id) - ("data", fc::mutable_variant_object() - ("old", old_permission) - ("new", po) - ) - ); + //TODO: add formatter for custom type `permission_object` +// fc_dlog(*dm_logger, "PERM_OP UPD ${action_id} ${permission_id} ${data}", +// ("action_id", action_id) +// ("permission_id", po.id) +// ("data", fc::mutable_variant_object() +// ("old", old_permission) +// ("new", po) +// ) +// ); } }); } @@ -247,11 +250,12 @@ namespace eosio { namespace chain { _db.get_mutable_index().remove_object( permission.usage_id._id ); if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP REM ${action_id} ${permission_id} ${data}", - ("action_id", action_id) - ("permission_id", permission.id) - ("data", permission) - ); + //TODO: add formatter for custom type `permission_object` +// fc_dlog(*dm_logger, "PERM_OP REM ${action_id} ${permission_id} ${data}", +// ("action_id", action_id) +// ("permission_id", permission.id) +// ("data", permission) +// ); } _db.remove( permission ); diff --git a/libraries/chain/backing_store/db_context.cpp b/libraries/chain/backing_store/db_context.cpp index f5a0c603525..8aa7ec64b8d 100644 --- a/libraries/chain/backing_store/db_context.cpp +++ b/libraries/chain/backing_store/db_context.cpp @@ -24,20 +24,20 @@ std::string db_context::table_event(name code, name scope, name table, name qual void db_context::log_insert_table(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name payer) { fc_dlog(deep_mind_logger, "TBL_OP INS ${action_id} ${code} ${scope} ${table} ${payer}", ("action_id", action_id) - ("code", code) - ("scope", scope) - ("table", table) - ("payer", payer) + ("code", code.to_string()) + ("scope", scope.to_string()) + ("table", table.to_string()) + ("payer", payer.to_string()) ); } void db_context::log_remove_table(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name payer) { fc_dlog(deep_mind_logger, "TBL_OP REM ${action_id} ${code} ${scope} ${table} ${payer}", ("action_id", action_id) - ("code", code) - ("scope", scope) - ("table", table) - ("payer", payer) + ("code", code.to_string()) + ("scope", scope.to_string()) + ("table", table.to_string()) + ("payer", payer.to_string()) ); } @@ -45,11 +45,11 @@ void db_context::log_row_insert(fc::logger& deep_mind_logger, uint32_t action_id account_name payer, account_name primkey, const char* buffer, size_t buffer_size) { fc_dlog(deep_mind_logger, "DB_OP INS ${action_id} ${payer} ${table_code} ${scope} ${table_name} ${primkey} ${ndata}", ("action_id", action_id) - ("payer", payer) - ("table_code", code) - ("scope", scope) - ("table_name", table) - ("primkey", primkey) + ("payer", payer.to_string()) + ("table_code", code.to_string()) + ("scope", scope.to_string()) + ("table_name", table.to_string()) + ("primkey", primkey.to_string()) ("ndata", fc::to_hex(buffer, buffer_size)) ); } @@ -59,12 +59,12 @@ void db_context::log_row_update(fc::logger& deep_mind_logger, uint32_t action_id const char* old_buffer, size_t old_buffer_size, const char* new_buffer, size_t new_buffer_size) { fc_dlog(deep_mind_logger, "DB_OP UPD ${action_id} ${opayer}:${npayer} ${table_code} ${scope} ${table_name} ${primkey} ${odata}:${ndata}", ("action_id", action_id) - ("opayer", old_payer) - ("npayer", new_payer) - ("table_code", code) - ("scope", scope) - ("table_name", table) - ("primkey", primkey) + ("opayer", old_payer.to_string()) + ("npayer", new_payer.to_string()) + ("table_code", code.to_string()) + ("scope", scope.to_string()) + ("table_name", table.to_string()) + ("primkey", primkey.to_string()) ("odata", to_hex(old_buffer, old_buffer_size)) ("ndata", to_hex(new_buffer, new_buffer_size)) ); @@ -74,11 +74,11 @@ void db_context::log_row_remove(fc::logger& deep_mind_logger, uint32_t action_id account_name payer, account_name primkey, const char* buffer, size_t buffer_size) { fc_dlog(deep_mind_logger, "DB_OP REM ${action_id} ${payer} ${table_code} ${scope} ${table_name} ${primkey} ${odata}", ("action_id", action_id) - ("payer", payer) - ("table_code", code) - ("scope", scope) - ("table_name", table) - ("primkey", primkey) + ("payer", payer.to_string()) + ("table_code", code.to_string()) + ("scope", scope.to_string()) + ("table_name", table.to_string()) + ("primkey", primkey.to_string()) ("odata", fc::to_hex(buffer, buffer_size)) ); } diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 21c16bcaa30..7caa3bf2d71 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -890,7 +890,7 @@ namespace eosio { namespace chain { ilog("Data at tail end of block log which should contain the (incomplete) serialization of block ${num} " "has been written out to '${tail_path}'.", - ("num", block_num + 1)("tail_path", tail_path)); + ("num", block_num + 1)("tail_path", tail_path.string())); } @@ -933,7 +933,7 @@ namespace eosio { namespace chain { fc::path block_log::repair_log(const fc::path& data_dir, uint32_t truncate_at_block, const char* reversible_block_dir_name) { ilog("Recovering Block Log..."); EOS_ASSERT(fc::is_directory(data_dir) && fc::is_regular_file(data_dir / "blocks.log"), block_log_not_found, - "Block log not found in '${blocks_dir}'", ("blocks_dir", data_dir)); + "Block log not found in '${blocks_dir}'", ("blocks_dir", data_dir.string())); if (truncate_at_block == 0) truncate_at_block = UINT32_MAX; @@ -956,7 +956,7 @@ namespace eosio { namespace chain { if (strlen(reversible_block_dir_name) && fc::is_directory(blocks_dir/reversible_block_dir_name)) { fc::rename(blocks_dir/ reversible_block_dir_name, backup_dir/ reversible_block_dir_name); } - ilog("Moved existing blocks directory to backup location: '${new_blocks_dir}'", ("new_blocks_dir", backup_dir)); + ilog("Moved existing blocks directory to backup location: '${new_blocks_dir}'", ("new_blocks_dir", backup_dir.string())); const auto block_log_path = blocks_dir / "blocks.log"; const auto block_file_name = block_log_path.generic_string(); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2fc3cc41ed4..7f02471badd 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -666,10 +666,11 @@ struct controller_impl { const auto& idx = db.get_index(); for (auto& row : idx.indices()) { if (row.abi.size() != 0) { - fc_dlog(*dm_logger, "ABIDUMP ABI ${contract} ${abi}", - ("contract", row.name) - ("abi", row.abi) - ); + //TODO: add formatter for custom type `shared_blob` +// fc_dlog(*dm_logger, "ABIDUMP ABI ${contract} ${abi}", +// ("contract", row.name.to_string()) +// ("abi", row.abi) +// ); } } fc_dlog(*dm_logger, "ABIDUMP END"); @@ -1437,10 +1438,11 @@ struct controller_impl { { // Promote proposed schedule to pending schedule. if( !replay_head_time ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", - ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) - ("lib", pbhs.dpos_irreversible_blocknum) - ("schedule", producer_authority_schedule::from_shared(gpo.proposed_schedule) ) ); + //TODO: add formatter for custom type `shared_producer_authority_schedule` +// ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", +// ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) +// ("lib", pbhs.dpos_irreversible_blocknum) +// ("schedule", producer_authority_schedule::from_shared(gpo.proposed_schedule) ) ); } EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, @@ -1734,7 +1736,8 @@ struct controller_impl { bool transaction_failed = trace && trace->except; bool transaction_can_fail = receipt.status == transaction_receipt_header::hard_fail && std::holds_alternative(receipt.trx); if( transaction_failed && !transaction_can_fail) { - edump((*trace)); + //TODO: add formatter for custom type `transaction_trace` +// edump((*trace)); throw *trace->except; } @@ -2451,7 +2454,7 @@ void controller::preactivate_feature( uint32_t action_id, const digest_type& fea fc_dlog(*dm_logger, "FEATURE_OP PRE_ACTIVATE ${action_id} ${feature_digest} ${feature}", ("action_id", action_id) ("feature_digest", feature_digest) - ("feature", feature.to_variant()) + ("feature", feature.to_variant().as_string()) ); } @@ -3132,7 +3135,7 @@ void controller::add_to_ram_correction( account_name account, uint64_t ram_bytes ("action_id", action_id) ("correction_id", correction_object_id) ("event_id", event_id) - ("payer", account) + ("payer", account.to_string()) ("delta", ram_bytes) ); } @@ -3225,7 +3228,7 @@ std::optional controller::extract_chain_id_from_db( const path& s } void controller::replace_producer_keys( const public_key_type& key ) { - ilog("Replace producer keys with ${k}", ("k", key)); + ilog("Replace producer keys with ${k}", ("k", key.to_string())); mutable_db().modify( db().get(), [&]( auto& gp ) { gp.proposed_schedule_block_num = {}; gp.proposed_schedule.version = 0; @@ -3235,7 +3238,7 @@ void controller::replace_producer_keys( const public_key_type& key ) { my->head->pending_schedule = {}; my->head->pending_schedule.schedule.version = version; for (auto& prod: my->head->active_schedule.producers ) { - ilog("${n}", ("n", prod.producer_name)); + ilog("${n}", ("n", prod.producer_name.to_string())); std::visit([&](auto &auth) { auth.threshold = 1; auth.keys = {key_weight{key, 1}}; @@ -3283,7 +3286,7 @@ void controller_impl::on_activationram_correction > static_cast(current_ram_usage) ) { ram_delta = -current_ram_usage; elog( "account ${name} was to be reduced by ${adjust} bytes of RAM despite only using ${current} bytes of RAM", - ("name", itr->name)("adjust", itr->ram_correction)("current", current_ram_usage) ); + ("name", itr->name.to_string())("adjust", itr->ram_correction)("current", current_ram_usage) ); } std::string event_id; diff --git a/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp b/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp index 8397caa1052..b61302d8a4b 100644 --- a/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp +++ b/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp @@ -172,8 +172,8 @@ namespace eosio { namespace chain { if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "KV_OP REM ${action_id} ${db} ${payer} ${key} ${odata}", ("action_id", resource_manager._context->get_action_id()) - ("contract", name{ contract }) - ("payer", kv->payer) + ("contract", name{ contract }.to_string()) + ("payer", kv->payer.to_string()) ("key", fc::to_hex(kv->kv_key.data(), kv->kv_key.size())) ("odata", fc::to_hex(kv->kv_value.data(), kv->kv_value.size())) ); @@ -198,8 +198,8 @@ namespace eosio { namespace chain { if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "KV_OP UPD ${action_id} ${db} ${payer} ${key} ${odata}:${ndata}", ("action_id", resource_manager._context->get_action_id()) - ("contract", name{ contract }) - ("payer", payer) + ("contract", name{ contract }.to_string()) + ("payer", payer.to_string()) ("key", fc::to_hex(kv->kv_key.data(), kv->kv_key.size())) ("odata", fc::to_hex(kv->kv_value.data(), kv->kv_value.size())) ("ndata", fc::to_hex(value, value_size)) @@ -223,8 +223,8 @@ namespace eosio { namespace chain { if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "KV_OP INS ${action_id} ${db} ${payer} ${key} ${ndata}", ("action_id", resource_manager._context->get_action_id()) - ("contract", name{ contract }) - ("payer", payer) + ("contract", name{ contract }.to_string()) + ("payer", payer.to_string()) ("key", fc::to_hex(key, key_size)) ("ndata", fc::to_hex(value, value_size)) ); diff --git a/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp b/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp index 9b2c290ef9a..b1fac50346f 100644 --- a/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp +++ b/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp @@ -365,8 +365,8 @@ namespace eosio { namespace chain { if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "KV_OP REM ${action_id} ${db} ${payer} ${key} ${odata}", ("action_id", resource_manager._context->get_action_id()) - ("contract", contract_name) - ("payer", pp->payer) + ("contract", contract_name.to_string()) + ("payer", pp->payer.to_string()) ("key", fc::to_hex(key, key_size)) ("odata", fc::to_hex(pp->value, pp->value_size)) ); @@ -408,8 +408,8 @@ namespace eosio { namespace chain { if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "KV_OP UPD ${action_id} ${db} ${payer} ${key} ${odata}:${ndata}", ("action_id", resource_manager._context->get_action_id()) - ("contract", contract_name) - ("payer", payer) + ("contract", contract_name.to_string()) + ("payer", payer.to_string()) ("key", fc::to_hex(key, key_size)) ("odata", fc::to_hex(old_pp->value, old_pp->value_size)) ("ndata", fc::to_hex(value, value_size)) @@ -421,8 +421,8 @@ namespace eosio { namespace chain { if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { fc_dlog(*dm_logger, "KV_OP INS ${action_id} ${db} ${payer} ${key} ${ndata}", ("action_id", resource_manager._context->get_action_id()) - ("contract", contract_name) - ("payer", payer) + ("contract", contract_name.to_string()) + ("payer", payer.to_string()) ("key", fc::to_hex(key, key_size)) ("ndata", fc::to_hex(value, value_size)) ); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index d0665f9c716..0fae4e610d4 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -365,7 +365,7 @@ namespace eosio { namespace chain { abi_def abi; if( abi_serializer::to_abi( a.abi, abi )) return abi_serializer( abi, yield ); - } FC_CAPTURE_AND_LOG((n)) + } FC_CAPTURE_AND_LOG((n.to_string())) } return std::optional(); } diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 82da9deb542..7d1f66b6ffa 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -722,7 +722,7 @@ Allows privileged contracts to get and set subsets of blockchain parameters. if (auto dm_logger = _get_deep_mind_logger()) { fc_dlog(*dm_logger, "FEATURE_OP ACTIVATE ${feature_digest} ${feature}", ("feature_digest", feature_digest) - ("feature", itr->to_variant()) + ("feature", itr->to_variant().as_string()) ); } diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 5a2897bf228..5c9e91271c6 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -56,9 +56,10 @@ void resource_limits_manager::initialize_database() { // see default settings in the declaration if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP CONFIG INS ${data}", - ("data", config) - ); + //TODO: add formatter for custom type `resource_limits_config_object` +// fc_dlog(*dm_logger, "RLIMIT_OP CONFIG INS ${data}", +// ("data", config) +// ); } }); @@ -70,9 +71,10 @@ void resource_limits_manager::initialize_database() { state.virtual_net_limit = config.net_limit_parameters.max; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP STATE INS ${data}", - ("data", state) - ); + //TODO: add formatter for custom type `resource_limits_state_object` +// fc_dlog(*dm_logger, "RLIMIT_OP STATE INS ${data}", +// ("data", state) +// ); } }); } @@ -122,9 +124,10 @@ void resource_limits_manager::initialize_account(const account_name& account) { bl.owner = account; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS INS ${data}", - ("data", bl) - ); + //TODO: add formatter for custom type `resource_limits_object` +// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS INS ${data}", +// ("data", bl) +// ); } }); @@ -132,9 +135,10 @@ void resource_limits_manager::initialize_account(const account_name& account) { bu.owner = account; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE INS ${data}", - ("data", bu) - ); + //TODO: add formatter for custom type `resource_usage_object` +// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE INS ${data}", +// ("data", bu) +// ); } }); } @@ -151,9 +155,10 @@ void resource_limits_manager::set_block_parameters(const elastic_limit_parameter c.net_limit_parameters = net_limit_parameters; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP CONFIG UPD ${data}", - ("data", c) - ); + //TODO: add formatter for custom type `resource_limits_config_object` +// fc_dlog(*dm_logger, "RLIMIT_OP CONFIG UPD ${data}", +// ("data", c) +// ); } }); } @@ -186,9 +191,10 @@ void resource_limits_manager::add_transaction_usage(const flat_set bu.cpu_usage.add( cpu_usage, time_slot, config.account_cpu_usage_average_window ); if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE UPD ${data}", - ("data", bu) - ); + //TODO: add formatter for the custom type of bu +// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE UPD ${data}", +// ("data", bu) +// ); } }); @@ -263,7 +269,7 @@ void resource_limits_manager::add_pending_ram_usage( const account_name account, ("family", trace.family) ("operation", trace.operation) ("legacy_tag", trace.legacy_tag) - ("payer", account) + ("payer", account.to_string()) ("new_usage", u.ram_usage) ("delta", ram_delta) ); @@ -324,9 +330,10 @@ bool resource_limits_manager::set_account_limits( const account_name& account, i pending_limits.cpu_weight = cpu_weight; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS UPD ${data}", - ("data", pending_limits) - ); + //TODO: add formatter for custom type `resource_limits_object` +// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS UPD ${data}", +// ("data", pending_limits) +// ); } }); @@ -396,9 +403,10 @@ void resource_limits_manager::process_account_limit_updates() { } if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", - ("data", state) - ); + //TODO: add formatter for the custom type of state +// fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", +// ("data", state) +// ); } }); } @@ -418,9 +426,10 @@ void resource_limits_manager::process_block_usage(uint32_t block_num) { state.pending_net_usage = 0; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", - ("data", state) - ); + //TODO: add formatter for the custom type of state +// fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", +// ("data", state) +// ); } }); diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 426df1eed81..a3ca65244ab 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -685,9 +685,9 @@ namespace eosio { namespace chain { auto packed_signed_trx = fc::raw::pack(packed_trx.to_packed_transaction_v0()->get_signed_transaction()); fc_dlog(*dm_logger, "DTRX_OP PUSH_CREATE ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", ("action_id", get_action_id()) - ("sender", gto.sender) + ("sender", gto.sender.to_string()) ("sender_id", gto.sender_id) - ("payer", gto.payer) + ("payer", gto.payer.to_string()) ("published", gto.published) ("delay", gto.delay_until) ("expiration", gto.expiration) diff --git a/libraries/fc b/libraries/fc index a12d9ec86fd..dcf1b0c776f 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit a12d9ec86fdc0ab77976cd6b205477233b6df8af +Subproject commit dcf1b0c776fbaa5211ff3e9ece96bfa71c433c44 diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b92056f2bf8..097cb196b9c 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -344,9 +344,6 @@ namespace eosio { fc::logger logger; std::string peer_log_format; - auto new_logger = spdlog::stdout_logger_st("test"); - - // peer_[x]log must be called from thread in connection strand #define peer_dlog( PEER, FORMAT, ... ) \ FC_MULTILINE_MACRO_BEGIN \ @@ -3588,7 +3585,7 @@ namespace eosio { found_producer_key = producer_plug->is_producer_key(msg.key); if( allowed_it == allowed_peers.end() && private_it == private_keys.end() && !found_producer_key) { fc_elog( logger, "Peer ${peer} sent a handshake with an unauthorized key: ${key}.", - ("peer", msg.p2p_address)("key", msg.key) ); + ("peer", msg.p2p_address)("key", msg.key.to_string()) ); return false; } } @@ -3742,13 +3739,6 @@ namespace eosio { void net_plugin::plugin_initialize( const variables_map& options ) { fc_ilog( logger, "Initialize net plugin" ); - // call spdlog logging method directly - new_logger->info("Initialize net plugin - spdlog method"); - // call spdlog logging macro - new_logger->set_pattern("%v"); - SPDLOG_LOGGER_INFO(new_logger, "Initialize net plugin - spdlog Macro"); - // call updated fc logging macro - fc_new_ilog(new_logger, "Initialize net plugin - new fc Macro"); try { peer_log_format = options.at( "peer-log-format" ).as(); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c2727dad0fa..0774d6f9eb3 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -68,8 +68,7 @@ using boost::signals2::scoped_connection; const std::string logger_name("producer_plugin"); fc::logger _log; -//auto new_log = spdlog::create("producer_plugin"); -auto new_log = spdlog::stdout_logger_st("producer_plugin"); +fc::log_level level = _log.get_log_level(); const std::string trx_successful_trace_logger_name("transaction_success_tracing"); fc::logger _trx_successful_trace_log; @@ -343,12 +342,12 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", - ("p",block->producer)("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp) + ("p",block->producer.to_string())("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp.to_time_point()) ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) ("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, dpos: ${dpos}, conf: ${confs}, latency: ${latency} ms]", - ("p",hbs->block->producer)("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp) + ("p",hbs->block->producer.to_string())("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp.to_time_point()) ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) ("confs", hbs->block->confirmed)("latency", (fc::time_point::now() - hbs->block->timestamp).count()/1000 ) ); } @@ -381,13 +380,13 @@ class producer_plugin_impl : public std::enable_shared_from_thisid())("a",trx->get_transaction().first_authorizer())("why",ex->what())); + ("txid", trx->id())("a",trx->get_transaction().first_authorizer().to_string())("why",ex->what())); next(ex); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", - ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()))); + ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", - ("trx", self->chain_plug->get_log_trx(trx->get_transaction()))); + ("trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); }; try { auto result = future.get(); @@ -430,50 +429,50 @@ class producer_plugin_impl : public std::enable_shared_from_thisid()) - ("a", trx->packed_trx()->get_transaction().first_authorizer()) + ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string()) ("why",except_ptr->what())); fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()))); + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) + ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("entire_trace", get_trace(response))); + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) + ("entire_trace", get_trace(response).as_string())); } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid}, auth: ${a} : ${why} ", ("txid", trx->id()) - ("a", trx->packed_trx()->get_transaction().first_authorizer()) + ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string()) ("why",except_ptr->what())); fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()))); + ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", - ("entire_trace", get_trace(response))); + ("entire_trace", get_trace(response).as_string())); } } else { if (_pending_block_mode == pending_block_mode::producing) { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${txid}, auth: ${a}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) ("txid", trx->id()) - ("a", trx->packed_trx()->get_transaction().first_authorizer())); + ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string())); fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()))); + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) + ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer()) - ("entire_trace", get_trace(response))); + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) + ("entire_trace", get_trace(response).as_string())); } else { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}, auth: ${a}", ("txid", trx->id()) - ("a", trx->packed_trx()->get_transaction().first_authorizer())); + ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string())); fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()))); + ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", - ("entire_trace", get_trace(response))); + ("entire_trace", get_trace(response).as_string())); } } }; @@ -532,14 +531,14 @@ class producer_plugin_impl : public std::enable_shared_from_thisbilled_cpu_time_us, false, sub_bill ); - fc_dlog( _trx_failed_trace_log, "Subjective bill for ${a}: ${b} elapsed ${t}us", ("a",first_auth)("b",sub_bill)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective bill for ${a}: ${b} elapsed ${t}us", ("a",first_auth.to_string())("b",sub_bill)("t",trace->elapsed)); if( trace->except ) { if( exception_is_exhausted( *trace->except, deadline_is_subjective )) { _unapplied_transactions.add_incoming( trx, persist_until_expired, return_failure_trace, next ); if( _pending_block_mode == pending_block_mode::producing ) { fc_dlog(_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING, ec: ${c} ", ("block_num", chain.head_block_num() + 1) - ("prod", get_pending_block_producer()) + ("prod", get_pending_block_producer().to_string()) ("txid", trx->id())("c", trace->except->code())); } else { fc_dlog(_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING, ec: ${c}", @@ -775,7 +774,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ auto key_id_to_wif_pair = dejsonify>(key_id_to_wif_pair_string); my->_signature_providers[key_id_to_wif_pair.first] = app().get_plugin().signature_provider_for_private_key(key_id_to_wif_pair.second); auto blanked_privkey = std::string(key_id_to_wif_pair.second.to_string().size(), '*' ); - wlog("\"private-key\" is DEPRECATED, use \"signature-provider=${pub}=KEY:${priv}\"", ("pub",key_id_to_wif_pair.first)("priv", blanked_privkey)); + wlog("\"private-key\" is DEPRECATED, use \"signature-provider=${pub}=KEY:${priv}\"", ("pub",key_id_to_wif_pair.first.to_string())("priv", blanked_privkey)); } catch ( const std::exception& e ) { elog("Malformed private key pair"); } @@ -791,7 +790,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ } catch(secure_enclave_exception& e) { elog("Error with Secure Enclave signature provider: ${e}; ignoring ${val}", ("e", e.top_message())("val", key_spec_pair)); } catch (fc::exception& e) { - elog("Malformed signature provider: \"${val}\": ${e}, ignoring!", ("val", key_spec_pair)("e", e)); + elog("Malformed signature provider: \"${val}\": ${e}, ignoring!", ("val", key_spec_pair)("e", e.to_string())); } catch (...) { elog("Malformed signature provider: \"${val}\", ignoring!", ("val", key_spec_pair)); } @@ -1016,6 +1015,8 @@ void producer_plugin::handle_sighup() { fc::logger::update(trx_trace_success_logger_name, _trx_trace_success_log); fc::logger::update(trx_trace_failure_logger_name, _trx_trace_failure_log); fc::logger::update(trx_logger_name, _trx_log); + + } void producer_plugin::pause() { @@ -1476,7 +1477,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + //TODO: add formatter for custom type block_signing_authority_v0 +// elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; } else if ( _pause_production ) { elog("Not producing block because production is explicitly paused"); @@ -1492,15 +1494,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const block_timestamp_type block_timestamp{block_time}; if (current_watermark->first > hbs->block_num) { elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current fork's head (${head_block_num})", - ("producer", scheduled_producer.producer_name) + ("producer", scheduled_producer.producer_name.to_string()) ("watermark", current_watermark->first) ("head_block_num", hbs->block_num)); _pending_block_mode = pending_block_mode::speculating; } else if (current_watermark->second >= block_timestamp) { elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending block time (${block_timestamp})", - ("producer", scheduled_producer.producer_name) - ("watermark", current_watermark->second) - ("block_timestamp", block_timestamp)); + ("producer", scheduled_producer.producer_name.to_string()) + ("watermark", current_watermark->second.to_time_point()) + ("block_timestamp", block_timestamp.to_time_point())); _pending_block_mode = pending_block_mode::speculating; } } @@ -1529,7 +1531,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", - ("n", hbs->block_num + 1)("time", now)("p", scheduled_producer.producer_name)); + ("n", hbs->block_num + 1)("time", now)("p", scheduled_producer.producer_name.to_string())); try { uint16_t blocks_to_confirm = 0; @@ -1592,8 +1594,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } std::swap( features_to_activate, protocol_features_to_activate ); _protocol_features_signaled = true; - ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", - ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); + //TODO: add formatter for custom type `vector` +// ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", +// ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } } @@ -1605,7 +1608,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + //TODO: add formatter for custom type block_signing_authority_v0 +// elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } @@ -1676,23 +1680,23 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", ("block_num", chain.head_block_num() + 1)("txid", packed_trx_ptr->id()) - ("prod", chain.is_building_block() ? chain.pending_block_producer() : name()) ); + ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) ); fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${trx}", ("block_num", chain.head_block_num() + 1) - ("prod", chain.is_building_block() ? chain.pending_block_producer() : name()) - ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()))); + ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) + ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${entire_trx}", ("block_num", chain.head_block_num() + 1) - ("prod", chain.is_building_block() ? chain.pending_block_producer() : name()) - ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()))); + ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) + ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", ("txid", packed_trx_ptr->id())); fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${trx}", - ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()))); + ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${entire_trx}", - ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()))); + ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } ++num_expired_persistent; } else { @@ -1702,9 +1706,9 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) ("txid", packed_trx_ptr->id())); fc_dlog(_trx_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${trx}", - ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()))); + ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${entire_trx}", - ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()))); + ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } ++num_expired_other; } @@ -1792,7 +1796,7 @@ class account_failures { reason += "other"; } fc_dlog( _log, "Dropped ${n} trxs, account: ${a}, reason: ${r} exceeded", - ("n", e.second.num_failures - max_failures_per_account)("a", e.first)("r", reason) ); + ("n", e.second.num_failures - max_failures_per_account)("a", e.first.to_string())("r", reason) ); } } } @@ -1818,7 +1822,7 @@ class account_failures { } else { ex_flags = set_field( ex_flags, ex_fields::ex_other_exception ); fc_dlog( _log, "Failed trx, account: ${a}, reason: ${r}", - ("a", n)("r", exception_code) ); + ("a", n.to_string())("r", exception_code) ); } } @@ -1890,7 +1894,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin const uint32_t sub_bill = 0; auto trace = chain.push_transaction( trx, trx_deadline, prev_billed_cpu_time_us, false, sub_bill ); - fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for ${a}: ${b} prev ${t}us", ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for ${a}: ${b} prev ${t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); if( trace->except ) { if( exception_is_exhausted( *trace->except, deadline_is_subjective ) ) { if( block_is_exhausted() ) { @@ -1900,7 +1904,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin } // don't erase, subjective failure so try again next time } else { - fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for failed ${a}: ${b} prev ${t}us", ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for failed ${a}: ${b} prev ${t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); auto failure_code = trace->except->code(); if( failure_code != tx_duplicate::code_value ) { // this failed our configured maximum transaction time, we don't want to replay it @@ -1922,7 +1926,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin continue; } } else { - fc_dlog( _trx_successful_trace_log, "Subjective unapplied bill for success ${a}: ${b} prev ${t}us", ("a",first_auth)("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_successful_trace_log, "Subjective unapplied bill for success ${a}: ${b} prev ${t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); // if db_read_mode SPECULATIVE then trx is in the pending block and not immediately reverted _subjective_billing.subjective_bill( trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed, chain.get_read_mode() == chain::db_read_mode::SPECULATIVE ); @@ -2155,29 +2159,7 @@ void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) ); fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}", - ( "time", deadline )("num", chain.head_block_num() + 1) ); - - // spdlog default level is `info` - new_log->set_level(spdlog::level::debug); - // use the new formatter defined in fc/time.hpp to format custom type `fc::time_point` - fc_new_dlog( new_log, "fc_new_dlog: Scheduling Block Production on Normal Block {} for {}", - chain.head_block_num() + 1, deadline ); - - // use fmt::vformat to format custom type `fc::time_point` - fmt::dynamic_format_arg_store store; - store.push_back(deadline); - auto deadline_str = spdlog::fmt_lib::vformat("{}", store); - fc_new_dlog(new_log, "fc_new_dlog: Scheduling Block Production on Normal Block {} for {}", chain.head_block_num() + 1, deadline_str); - - // - auto str = fmt::format("fc_new_dlog: Scheduling Block Production on Normal Block #{num} for {time}", - fmt::arg("time", deadline_str), fmt::arg("num", chain.head_block_num() + 1)); - fc_new_dlog( new_log, "{}", str); - - // - fc_new_dlog_2(new_log, "fc_new_dlog_2: Scheduling Block Production on Normal Block #{num} for {time}", - ("time", deadline_str)("num", chain.head_block_num() + 1)); - + ("num", chain.head_block_num() + 1)( "time", deadline ) ); } else { EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); @@ -2224,7 +2206,7 @@ std::optional producer_plugin_impl::calculate_producer_wake_up_t void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time) { if (wake_up_time) { - fc_dlog(_log, "Scheduling Speculative/Production Change at ${time}", ("time", wake_up_time)); + fc_dlog(_log, "Scheduling Speculative/Production Change at ${time}", ("time", wake_up_time.value())); static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); _timer.expires_at(epoch + boost::posix_time::microseconds(wake_up_time->time_since_epoch().count())); _timer.async_wait( app().get_priority_queue().wrap( priority::high, @@ -2308,8 +2290,8 @@ void producer_plugin_impl::produce_block() { chain.commit_block(); block_state_ptr new_bs = chain.head_block_state(); ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", - ("p",new_bs->header.producer)("id",new_bs->id.str().substr(8,16)) - ("n",new_bs->block_num)("t",new_bs->header.timestamp) + ("p",new_bs->header.producer.to_string())("id",new_bs->id.str().substr(8,16)) + ("n",new_bs->block_num)("t",new_bs->header.timestamp.to_time_point()) ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); } @@ -2318,9 +2300,9 @@ void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, ("txid", trx_id)("why", reason)); fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", - ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); + ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", - ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()) : fc::variant{trx_id})); + ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); } bool producer_plugin::execute_incoming_transaction(const chain::transaction_metadata_ptr& trx, diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 4106801c8a8..d3d5edfd9cf 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -136,7 +136,8 @@ struct state_history_plugin_impl : std::enable_shared_from_this std::enable_if_t> operator()(T&& req) { - fc_ilog(_log, "received get_blocks_request = ${req}", ("req",req) ); + //TODO: add formatter for custom type `get_blocks_request_v0` + //fc_ilog(_log, "received get_blocks_request = {req}", ("req",req) ); auto request_span = fc_create_trace("get_blocks_request"); to_send_block_num = req.start_block_num; for (auto& cp : req.have_positions) { @@ -163,7 +164,8 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock_num % 1000 == 0) ) { fc_ilog(_log, "pushing result {\"head\":{\"block_num\":${head}},\"last_irreversible\":{\"block_num\":${last_irr}},\"this_block\":{\"block_num\":${this_block}, \"id\": ${id}}} to send queue", ("head", result.head.block_num)("last_irr", result.last_irreversible.block_num) - ("this_block", result.this_block ? result.this_block->block_num : fc::variant()) + ("this_block", result.this_block ? result.this_block->block_num : fc::variant().as_uint64()) ("id", block_id ? block_id->_hash[3] : 0 )); } From cfb0398a49dea011ace699ab9596b4c659432910 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 14 Feb 2022 12:20:28 -0500 Subject: [PATCH 09/31] Update spdlog submodule and fix errors caused by updated fc log macros --- libraries/spdlog | 2 +- plugins/wallet_plugin/wallet.cpp | 4 ++-- programs/rodeos/streams/logger.hpp | 2 +- unittests/api_tests.cpp | 3 ++- unittests/auth_tests.cpp | 9 ++++++--- unittests/bootseq_tests.cpp | 2 +- unittests/currency_tests.cpp | 6 ++++-- unittests/delay_tests.cpp | 3 ++- unittests/misc_tests.cpp | 2 +- unittests/producer_schedule_tests.cpp | 3 ++- unittests/whitelist_blacklist_tests.cpp | 13 ++++++++----- 11 files changed, 30 insertions(+), 19 deletions(-) diff --git a/libraries/spdlog b/libraries/spdlog index 62430ee3d4e..2d0bc286e23 160000 --- a/libraries/spdlog +++ b/libraries/spdlog @@ -1 +1 @@ -Subproject commit 62430ee3d4e306f2527916ed852595b5e66e3d2d +Subproject commit 2d0bc286e237999a25bd97195e2cb30634f2e007 diff --git a/plugins/wallet_plugin/wallet.cpp b/plugins/wallet_plugin/wallet.cpp index a4859a584ec..c0dbd4b9721 100644 --- a/plugins/wallet_plugin/wallet.cpp +++ b/plugins/wallet_plugin/wallet.cpp @@ -88,8 +88,8 @@ class soft_wallet_impl dest_path = destination_filename + "-" + std::to_string( suffix ) + _wallet_filename_extension; } wlog( "backing up wallet ${src} to ${dest}", - ("src", src_path) - ("dest", dest_path) ); + ("src", src_path.string()) + ("dest", dest_path.string()) ); fc::path dest_parent = fc::absolute(dest_path).parent_path(); try diff --git a/programs/rodeos/streams/logger.hpp b/programs/rodeos/streams/logger.hpp index 8c383828081..7226c33e3f3 100644 --- a/programs/rodeos/streams/logger.hpp +++ b/programs/rodeos/streams/logger.hpp @@ -14,7 +14,7 @@ class logger : public stream_handler { void publish(const std::vector& data, const std::string& routing_key) override { ilog("logger stream ${r}: [${data_size}] >> ${data}", - ("r", routing_key)("data", data)("data_size", data.size())); + ("r", routing_key)("data", std::string(data.begin(), data.end()))("data_size", data.size())); } }; diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index dad9127f32d..3165feb770c 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1672,7 +1672,8 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(more_deferred_transaction_tests, TYPE_T, backing_s auto print_deferred = [&index]() { for( const auto& gto : index ) { - wlog("id = ${id}, trx_id = ${trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); + //TODO: add formatter for custom type `oid` defined in chainbase.hpp +// wlog("id = ${id}, trx_id = ${trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); } }; diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index 0d265775d72..f3d29ac7991 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -68,19 +68,22 @@ BOOST_FIXTURE_TEST_CASE( delegate_auth, TESTER ) { try { }); auto original_auth = static_cast(control->get_authorization_manager().get_permission({"alice"_n, config::active_name}).auth); - wdump((original_auth)); + //TODO: add formatter for custom type `authority` defined in authority.hpp +// wdump((original_auth)); set_authority( "alice"_n, config::active_name, delegated_auth ); auto new_auth = static_cast(control->get_authorization_manager().get_permission({"alice"_n, config::active_name}).auth); - wdump((new_auth)); + //TODO: add formatter for custom type `authority` defined in authority.hpp +// wdump((new_auth)); BOOST_CHECK_EQUAL((new_auth == delegated_auth), true); produce_block(); produce_block(); auto auth = static_cast(control->get_authorization_manager().get_permission({"alice"_n, config::active_name}).auth); - wdump((auth)); + //TODO: add formatter for custom type `authority` defined in authority.hpp +// wdump((auth)); BOOST_CHECK_EQUAL((new_auth == auth), true); /// execute nonce from alice signed by bob diff --git a/unittests/bootseq_tests.cpp b/unittests/bootseq_tests.cpp index 41ad766ddc4..e6ef6899b6b 100644 --- a/unittests/bootseq_tests.cpp +++ b/unittests/bootseq_tests.cpp @@ -167,7 +167,7 @@ class bootseq_tester : public TESTER { } void set_code_abi(const account_name& account, const vector& wasm, const char* abi, const private_key_type* signer = nullptr) { - wdump((account)); + wdump((account.to_string())); set_code(account, wasm, signer); set_abi(account, abi, signer); if (account == config::system_account_name) { diff --git a/unittests/currency_tests.cpp b/unittests/currency_tests.cpp index f82d87d4c6a..3e6becc9ca8 100644 --- a/unittests/currency_tests.cpp +++ b/unittests/currency_tests.cpp @@ -83,14 +83,16 @@ class currency_tester : public TESTER { ("can_recall", 0) ("can_whitelist", 0) ); - wdump((result)); + //TODO: add formatter for custom type `transaction_trace` defined in trace.hpp +// wdump((result)); result = push_action("eosio.token"_n, "issue"_n, mutable_variant_object() ("to", eosio_token) ("quantity", "1000000.0000 CUR") ("memo", "gggggggggggg") ); - wdump((result)); + //TODO: add formatter for custom type `transaction_trace` defined in trace.hpp +// wdump((result)); produce_block(); } diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index f4b8b994362..e466874e69d 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -68,7 +68,8 @@ BOOST_FIXTURE_TEST_CASE( delay_error_create_account, validating_tester) { try { ilog( fc::json::to_pretty_string(trx) ); auto trace = push_transaction( trx ); - edump((*trace)); + //TODO: add formatter for custom type `transaction_trace` defined in trace.hpp +// edump((*trace)); produce_blocks(6); diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index ec5c502573c..c16eee75291 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1415,7 +1415,7 @@ BOOST_AUTO_TEST_CASE(public_key_from_hash) { std::memcpy(&data.data[1], hash.data(), hash.data_size() ); fc::ecc::public_key_shim shim(data); fc::crypto::public_key eos_unknown_pk(std::move(shim)); - ilog( "public key with no known private key: ${k}", ("k", eos_unknown_pk) ); + ilog( "public key with no known private key: ${k}", ("k", eos_unknown_pk.to_string()) ); } BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index 3d9499bf27f..66e5d5bd625 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -481,7 +481,8 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { wdump((alice_last_produced_block_num)); { - wdump((c.control->head_block_state()->producer_to_last_produced)); + //TODO: add formatter for type `flat_map` used in block_header_state.hpp +// wdump((c.control->head_block_state()->producer_to_last_produced)); const auto& last_produced = c.control->head_block_state()->producer_to_last_produced; auto alice_itr = last_produced.find( "alice"_n ); BOOST_REQUIRE( alice_itr != last_produced.end() ); diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index 574b102d609..e1ce784af15 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -36,7 +36,8 @@ class whitelist_blacklist_tester { cfg.contract_blacklist = contract_blacklist; cfg.action_blacklist = action_blacklist; }, !shutdown_called); - wdump((last_produced_block)); + //TODO: add formatter for type `map` +// wdump((last_produced_block)); chain->set_last_produced_block_map( last_produced_block ); if( !bootstrap ) return; @@ -59,7 +60,8 @@ class whitelist_blacklist_tester { void shutdown() { FC_ASSERT( chain, "chain is not up" ); last_produced_block = chain->get_last_produced_block_map(); - wdump((last_produced_block)); + //TODO: add formatter for type `map` +// wdump((last_produced_block)); chain.reset(); shutdown_called = true; } @@ -483,10 +485,10 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { if( t->receipt && t->receipt->status == transaction_receipt::executed ) { wlog( "${trx_type} ${id} executed (first action is ${code}::${action})", - ("trx_type", t->scheduled ? "scheduled trx" : "trx")("id", t->id)("code", act.account)("action", act.name) ); + ("trx_type", t->scheduled ? "scheduled trx" : "trx")("id", t->id)("code", act.account.to_string())("action", act.name.to_string()) ); } else { wlog( "${trx_type} ${id} failed (first action is ${code}::${action})", - ("trx_type", t->scheduled ? "scheduled trx" : "trx")("id", t->id)("code", act.account)("action", act.name) ); + ("trx_type", t->scheduled ? "scheduled trx" : "trx")("id", t->id)("code", act.account.to_string())("action", act.name.to_string()) ); } }; @@ -802,7 +804,8 @@ BOOST_AUTO_TEST_CASE( greylist_limit_tests ) { try { BOOST_REQUIRE( rm.get_virtual_block_net_limit() > (3*cfg.max_block_net_usage) ); BOOST_REQUIRE( rm.get_virtual_block_net_limit() < (4*cfg.max_block_net_usage) ); - wdump((rm.get_account_net_limit_ex(user_account))); + //TODO: add formatter for custom type `std::pair` defined in resource_limits.hpp +// wdump((rm.get_account_net_limit_ex(user_account))); BOOST_REQUIRE( rm.get_account_net_limit_ex(user_account).first.max > 3*reqauth_net_charge ); BOOST_REQUIRE( rm.get_account_net_limit_ex(user_account).first.max < 4*reqauth_net_charge ); From ce5ca3244ea61d0eafa449484b1d3dfc44d8b841 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 14 Feb 2022 12:27:44 -0500 Subject: [PATCH 10/31] Use the latest commit of spdlog --- libraries/spdlog | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/spdlog b/libraries/spdlog index 2d0bc286e23..f2461f14306 160000 --- a/libraries/spdlog +++ b/libraries/spdlog @@ -1 +1 @@ -Subproject commit 2d0bc286e237999a25bd97195e2cb30634f2e007 +Subproject commit f2461f143060628452d9f918348399cca40d3d5b From 7594d8a667d41e4a9035b774a0d28c665a6a4142 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 14 Feb 2022 13:51:58 -0500 Subject: [PATCH 11/31] Update logging.json with some spdlog appenders --- programs/nodeos/logging.json | 43 +++++++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 11 deletions(-) diff --git a/programs/nodeos/logging.json b/programs/nodeos/logging.json index 8b1c55d3c86..fff79c15e61 100644 --- a/programs/nodeos/logging.json +++ b/programs/nodeos/logging.json @@ -46,6 +46,16 @@ "host": "host_name" }, "enabled": true + },{ + "name": "stderr_color_st", + "type": "stderr_color_sink_st", + "args": {}, + "enabled": true + },{ + "name": "stdout_color_st", + "type": "stdout_color_sink_st", + "args": {}, + "enabled": true } ], "loggers": [{ @@ -55,7 +65,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "net_plugin_impl", @@ -64,7 +75,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "http_plugin", @@ -73,7 +85,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "producer_plugin", @@ -82,7 +95,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "transaction_success_tracing", @@ -91,7 +105,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "transaction_failure_tracing", @@ -100,7 +115,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "transaction_trace_success", @@ -109,7 +125,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "transaction_trace_failure", @@ -118,7 +135,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "transaction", @@ -127,7 +145,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "state_history", @@ -136,7 +155,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] },{ "name": "trace_api", @@ -145,7 +165,8 @@ "additivity": false, "appenders": [ "stderr", - "net" + "net", + "stderr_color_st" ] } ] From d144b8ee48d368855166710dd155a17859a8065d Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Fri, 25 Feb 2022 14:18:56 -0500 Subject: [PATCH 12/31] Test the new format method created for the reflected types --- .../chain/include/eosio/chain/transaction.hpp | 18 ++++ libraries/fc | 2 +- plugins/producer_plugin/producer_plugin.cpp | 85 ++++++++++--------- 3 files changed, 62 insertions(+), 43 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 662d5264d61..e65bb7d63ec 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -351,6 +351,24 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain +#include +#include +namespace fmt { + template<> + struct formatter { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const eosio::chain::transaction& p, FormatContext& ctx ) { + fc::string fmt, arg; + fc::formatter::format_arg(p, fmt, arg); + return format_to( ctx.out(), "{}", arg ); + //return format_to( ctx.out(), fmt, args ); //TODO + } + }; +} + FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) FC_REFLECT( eosio::chain::transaction_header, (expiration)(ref_block_num)(ref_block_prefix) (max_net_usage_words)(max_cpu_usage_ms)(delay_sec) ) diff --git a/libraries/fc b/libraries/fc index dcf1b0c776f..33ec8d72299 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit dcf1b0c776fbaa5211ff3e9ece96bfa71c433c44 +Subproject commit 33ec8d7229914b9e414b0705558995e477044462 diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0774d6f9eb3..02828c283a7 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -383,10 +383,10 @@ class producer_plugin_impl : public std::enable_shared_from_thisid())("a",trx->get_transaction().first_authorizer().to_string())("why",ex->what())); next(ex); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", - ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", - ("trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", +// ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", +// ("trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); }; try { auto result = future.get(); @@ -436,20 +436,21 @@ class producer_plugin_impl : public std::enable_shared_from_thisget_log_trx(trx->packed_trx()->get_transaction()).as_string())); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) - ("entire_trace", get_trace(response).as_string())); + //("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); + ("trx", trx->packed_trx()->get_transaction())); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", +// ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) +// ("entire_trace", get_trace(response).as_string())); } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid}, auth: ${a} : ${why} ", ("txid", trx->id()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string()) ("why",except_ptr->what())); - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", - ("entire_trace", get_trace(response).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", +// ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", +// ("entire_trace", as_string()))get_trace(response).; } } else { if (_pending_block_mode == pending_block_mode::producing) { @@ -458,21 +459,21 @@ class producer_plugin_impl : public std::enable_shared_from_thisid()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string())); - fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) - ("entire_trace", get_trace(response).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", +// ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) +// ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); +// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", +// ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) +// ("entire_trace", get_trace(response).as_string())); } else { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}, auth: ${a}", ("txid", trx->id()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string())); - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", - ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", - ("entire_trace", get_trace(response).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", +// ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); +// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", +// ("entire_trace", get_trace(response).as_string())); } } }; @@ -1682,21 +1683,21 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) ("block_num", chain.head_block_num() + 1)("txid", packed_trx_ptr->id()) ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) ); - fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${trx}", - ("block_num", chain.head_block_num() + 1) - ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) - ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${entire_trx}", - ("block_num", chain.head_block_num() + 1) - ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) - ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${trx}", +// ("block_num", chain.head_block_num() + 1) +// ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) +// ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${entire_trx}", +// ("block_num", chain.head_block_num() + 1) +// ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) +// ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", ("txid", packed_trx_ptr->id())); - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${trx}", - ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${entire_trx}", - ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${trx}", +// ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${entire_trx}", +// ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } ++num_expired_persistent; } else { @@ -1705,10 +1706,10 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", ("txid", packed_trx_ptr->id())); - fc_dlog(_trx_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${trx}", - ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${entire_trx}", - ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); +// fc_dlog(_trx_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${trx}", +// ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${entire_trx}", +// ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } ++num_expired_other; } @@ -2299,10 +2300,10 @@ void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", ("txid", trx_id)("why", reason)); - fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", - ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", - ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", +// ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", +// ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); } bool producer_plugin::execute_incoming_transaction(const chain::transaction_metadata_ptr& trx, From 4f1cb326e4a7a7d3b21eb783d9832a86dd387619 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Sat, 26 Feb 2022 00:29:23 -0500 Subject: [PATCH 13/31] Revert the last commit and add formatter struct in FC_REFLECT macro for reflected types --- .../chain/include/eosio/chain/transaction.hpp | 18 ------------------ libraries/fc | 2 +- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index e65bb7d63ec..662d5264d61 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -351,24 +351,6 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain -#include -#include -namespace fmt { - template<> - struct formatter { - template - constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } - - template - auto format( const eosio::chain::transaction& p, FormatContext& ctx ) { - fc::string fmt, arg; - fc::formatter::format_arg(p, fmt, arg); - return format_to( ctx.out(), "{}", arg ); - //return format_to( ctx.out(), fmt, args ); //TODO - } - }; -} - FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) FC_REFLECT( eosio::chain::transaction_header, (expiration)(ref_block_num)(ref_block_prefix) (max_net_usage_words)(max_cpu_usage_ms)(delay_sec) ) diff --git a/libraries/fc b/libraries/fc index 33ec8d72299..5a1d5d412f4 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 33ec8d7229914b9e414b0705558995e477044462 +Subproject commit 5a1d5d412f465da7768f50bb6407b9529d5201c8 From 45716a63def16a9f8b64dfa4bb4deeb6e4425cc9 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 28 Feb 2022 09:47:34 -0500 Subject: [PATCH 14/31] Add friend declarations and replace BOOST_PP_SEQ_NIL with BOOST_PP_EMPTY() --- libraries/chain/include/eosio/chain/asset.hpp | 2 ++ libraries/chain/include/eosio/chain/block_state.hpp | 2 ++ libraries/chain/include/eosio/chain/name.hpp | 2 ++ libraries/chain/include/eosio/chain/symbol.hpp | 2 ++ libraries/chain/include/eosio/chain/transaction.hpp | 4 ++++ libraries/fc | 2 +- unittests/api_tests.cpp | 4 ++-- 7 files changed, 15 insertions(+), 3 deletions(-) diff --git a/libraries/chain/include/eosio/chain/asset.hpp b/libraries/chain/include/eosio/chain/asset.hpp index dd427e3976d..bfab72df48f 100644 --- a/libraries/chain/include/eosio/chain/asset.hpp +++ b/libraries/chain/include/eosio/chain/asset.hpp @@ -85,6 +85,8 @@ struct asset : fc::reflect_init EOS_ASSERT( sym.valid(), asset_type_exception, "invalid symbol" ); } + friend struct fmt::formatter; + private: share_type amount; symbol sym; diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index a507c373585..3de8cef3117 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -32,6 +32,8 @@ namespace eosio { namespace chain { signed_block_ptr block; + friend struct fmt::formatter; + private: // internal use only, not thread safe friend struct fc::reflector; friend bool block_state_is_valid( const block_state& ); // work-around for multi-index access diff --git a/libraries/chain/include/eosio/chain/name.hpp b/libraries/chain/include/eosio/chain/name.hpp index 7c20d9685b1..e34ebdf0ccc 100644 --- a/libraries/chain/include/eosio/chain/name.hpp +++ b/libraries/chain/include/eosio/chain/name.hpp @@ -89,6 +89,8 @@ namespace eosio::chain { friend constexpr bool operator != ( const name& a, uint64_t b ) { return a.value != b; } constexpr explicit operator bool()const { return value != 0; } + + friend struct fmt::formatter; }; // Each char of the string is encoded into 5-bit chunk and left-shifted diff --git a/libraries/chain/include/eosio/chain/symbol.hpp b/libraries/chain/include/eosio/chain/symbol.hpp index 071505d83d3..0ef46c59b8b 100644 --- a/libraries/chain/include/eosio/chain/symbol.hpp +++ b/libraries/chain/include/eosio/chain/symbol.hpp @@ -138,6 +138,8 @@ namespace eosio { EOS_ASSERT( valid_name(name()), symbol_type_exception, "invalid symbol: ${name}", ("name",name())); } + friend struct fmt::formatter; + private: uint64_t m_value; friend struct fc::reflector; diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 662d5264d61..996ae168574 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace eosio { namespace chain { @@ -173,6 +174,7 @@ namespace eosio { namespace chain { const bytes& get_packed_context_free_data()const { return packed_context_free_data; } const bytes& get_packed_transaction()const { return packed_trx; } + friend struct fmt::formatter; private: void local_unpack_transaction(vector&& context_free_data); void local_unpack_context_free_data(); @@ -327,6 +329,8 @@ namespace eosio { namespace chain { std::size_t maximum_pruned_pack_size( cf_compression_type segment_compression ) const; + friend struct fmt::formatter; + private: friend struct fc::reflector; friend struct fc::reflector_init_visitor; diff --git a/libraries/fc b/libraries/fc index 5a1d5d412f4..4672869e756 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 5a1d5d412f465da7768f50bb6407b9529d5201c8 +Subproject commit 4672869e75605cec04dd3e41263cc5bceba11809 diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 3165feb770c..b60d7901b4a 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -115,7 +115,7 @@ struct test_api_action { } }; -FC_REFLECT_TEMPLATE((uint64_t T), test_api_action, BOOST_PP_SEQ_NIL) +FC_REFLECT_TEMPLATE((uint64_t T), test_api_action, BOOST_PP_EMPTY()) template struct test_chain_action { @@ -128,7 +128,7 @@ struct test_chain_action { } }; -FC_REFLECT_TEMPLATE((uint64_t T), test_chain_action, BOOST_PP_SEQ_NIL) +FC_REFLECT_TEMPLATE((uint64_t T), test_chain_action, BOOST_PP_EMPTY()) struct check_auth { account_name account; From d8ce70e95e7cb6bee471e62c343830f91736889f Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Fri, 4 Mar 2022 15:45:12 -0500 Subject: [PATCH 15/31] Update fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 4672869e756..9241225034d 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 4672869e75605cec04dd3e41263cc5bceba11809 +Subproject commit 9241225034d37b578123fc080aca739670cd2fbe From 1632224198bcdaa3bbb767a11eb53b417ea2b446 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 7 Mar 2022 00:42:15 -0500 Subject: [PATCH 16/31] Update fc --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 9241225034d..940cb04f160 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 9241225034d37b578123fc080aca739670cd2fbe +Subproject commit 940cb04f160f1c4ac4a7394991ddd86c380866af From 1179bd16840cb094f5f2918d180650c6b236e361 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 8 Mar 2022 10:26:07 -0500 Subject: [PATCH 17/31] Add customized formatters for transaction type --- .../chain/include/eosio/chain/transaction.hpp | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 996ae168574..6c3b46fcee0 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -355,6 +355,33 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain +namespace fmt { + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::vector& p, FormatContext& ctx ) { + auto f = fmt::formatter(); + for( const auto& i : p ) { f.format( i, ctx ); } + return format_to( ctx.out(), ""); + } + }; + + template + struct formatter>{ + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::pair& p, FormatContext& ctx ) { + fmt::formatter().format(p.second, ctx); + return format_to( ctx.out(), ""); + } + }; +} + FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) FC_REFLECT( eosio::chain::transaction_header, (expiration)(ref_block_num)(ref_block_prefix) (max_net_usage_words)(max_cpu_usage_ms)(delay_sec) ) From f0240b1fca00c2c509a258f05e3f81c930f38505 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 8 Mar 2022 16:07:01 -0500 Subject: [PATCH 18/31] Add formatters for permission_object --- libraries/chain/authorization_manager.cpp | 11 +++-- .../chain/include/eosio/chain/authority.hpp | 43 +++++++++++++++++++ .../include/eosio/chain/permission_object.hpp | 13 ++++++ libraries/fc | 2 +- 4 files changed, 62 insertions(+), 7 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index b972537157d..64b7b652329 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -159,12 +159,11 @@ namespace eosio { namespace chain { p.auth = auth; if (auto dm_logger = _control.get_deep_mind_logger()) { - //TODO: add formatter for custom type `permission_object` -// fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", -// ("action_id", action_id) -// ("permission_id", p.id) -// ("data", p) -// ); + fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", + ("action_id", action_id) + ("permission_id", p.id) + ("data", p) + ); } }); return perm; diff --git a/libraries/chain/include/eosio/chain/authority.hpp b/libraries/chain/include/eosio/chain/authority.hpp index 5b5da057442..85a7969dfba 100644 --- a/libraries/chain/include/eosio/chain/authority.hpp +++ b/libraries/chain/include/eosio/chain/authority.hpp @@ -323,6 +323,49 @@ namespace fc { void to_variant(const eosio::chain::shared_public_key& var, fc::variant& vo); } // namespace fc +namespace fmt { + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const eosio::chain::shared_vector& p, FormatContext& ctx ) { + auto f = fmt::formatter(); + for( const auto& i : p ) { f.format( i, ctx ); } + return format_to( ctx.out(), ""); + } + }; + template<> + struct formatter { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const eosio::chain::shared_public_key_data& p, FormatContext& ctx ) { + std::visit([&](auto&& arg) { + using T = std::decay_t; + if constexpr (std::is_same_v || std::is_same_v) + fmt::formatter().format(arg, ctx); + else if constexpr (std::is_same_v) + format_to( ctx.out(), "{}", arg.data() ); + }, p); + return format_to( ctx.out(), ""); + } + }; + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const fc::array& p, FormatContext& ctx ) { + + return format_to( ctx.out(), "{}", std::string_view(p.begin(), N)); + } + }; +} + FC_REFLECT(eosio::chain::permission_level_weight, (permission)(weight) ) FC_REFLECT(eosio::chain::key_weight, (key)(weight) ) FC_REFLECT(eosio::chain::wait_weight, (wait_sec)(weight) ) diff --git a/libraries/chain/include/eosio/chain/permission_object.hpp b/libraries/chain/include/eosio/chain/permission_object.hpp index d323a1ef899..afc55bb31e2 100644 --- a/libraries/chain/include/eosio/chain/permission_object.hpp +++ b/libraries/chain/include/eosio/chain/permission_object.hpp @@ -117,6 +117,19 @@ namespace eosio { namespace chain { } } } // eosio::chain +namespace fmt { + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const chainbase::oid& p, FormatContext& ctx ) { + return format_to( ctx.out(), "{}", std::string_view(boost::core::demangle(typeid(chainbase::oid).name())) ); + } + }; +} + CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_object, eosio::chain::permission_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::permission_usage_object, eosio::chain::permission_usage_index) diff --git a/libraries/fc b/libraries/fc index 940cb04f160..c8ce1833e09 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 940cb04f160f1c4ac4a7394991ddd86c380866af +Subproject commit c8ce1833e09af67a6eeba9ed4a6a6b4937eae902 From a53e81f70ce996d755eadefb49d4b1f1f2d0e5e5 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Wed, 9 Mar 2022 00:46:12 -0500 Subject: [PATCH 19/31] Add formatters for block_signing_authority --- libraries/chain/authorization_manager.cpp | 42 ++++++++----------- libraries/chain/controller.cpp | 18 ++++---- .../chain/include/eosio/chain/authority.hpp | 1 - .../include/eosio/chain/producer_schedule.hpp | 34 +++++++++++++++ libraries/fc | 2 +- 5 files changed, 60 insertions(+), 37 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index 64b7b652329..c8bdefb618f 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -199,12 +199,11 @@ namespace eosio { namespace chain { p.auth = std::move(auth); if (auto dm_logger = _control.get_deep_mind_logger()) { - //TODO: add formatter for custom type `permission_object` -// fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", -// ("action_id", action_id) -// ("permission_id", p.id) -// ("data", p) -// ); + fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", + ("action_id", action_id) + ("permission_id", p.id) + ("data", p) + ); } }); return perm; @@ -218,24 +217,18 @@ namespace eosio { namespace chain { _db.modify( permission, [&](permission_object& po) { auto dm_logger = _control.get_deep_mind_logger(); - fc::variant old_permission; - if (dm_logger) { - old_permission = po; - } + permission_object old_permission(po); po.auth = auth; po.last_updated = _control.pending_block_time(); if (auto dm_logger = _control.get_deep_mind_logger()) { - //TODO: add formatter for custom type `permission_object` -// fc_dlog(*dm_logger, "PERM_OP UPD ${action_id} ${permission_id} ${data}", -// ("action_id", action_id) -// ("permission_id", po.id) -// ("data", fc::mutable_variant_object() -// ("old", old_permission) -// ("new", po) -// ) -// ); + fc_dlog(*dm_logger, "PERM_OP UPD ${action_id} ${permission_id} ${old} ${new}", + ("action_id", action_id) + ("permission_id", po.id) + ("old", old_permission) + ("new", po) + ); } }); } @@ -249,12 +242,11 @@ namespace eosio { namespace chain { _db.get_mutable_index().remove_object( permission.usage_id._id ); if (auto dm_logger = _control.get_deep_mind_logger()) { - //TODO: add formatter for custom type `permission_object` -// fc_dlog(*dm_logger, "PERM_OP REM ${action_id} ${permission_id} ${data}", -// ("action_id", action_id) -// ("permission_id", permission.id) -// ("data", permission) -// ); + fc_dlog(*dm_logger, "PERM_OP REM ${action_id} ${permission_id} ${data}", + ("action_id", action_id) + ("permission_id", permission.id) + ("data", permission) + ); } _db.remove( permission ); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 7f02471badd..fe72960dd5e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -666,11 +666,10 @@ struct controller_impl { const auto& idx = db.get_index(); for (auto& row : idx.indices()) { if (row.abi.size() != 0) { - //TODO: add formatter for custom type `shared_blob` -// fc_dlog(*dm_logger, "ABIDUMP ABI ${contract} ${abi}", -// ("contract", row.name.to_string()) -// ("abi", row.abi) -// ); + fc_dlog(*dm_logger, "ABIDUMP ABI ${contract} ${abi}", + ("contract", row.name.to_string()) + ("abi", std::string_view(row.abi.data())) + ); } } fc_dlog(*dm_logger, "ABIDUMP END"); @@ -1438,11 +1437,10 @@ struct controller_impl { { // Promote proposed schedule to pending schedule. if( !replay_head_time ) { - //TODO: add formatter for custom type `shared_producer_authority_schedule` -// ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", -// ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) -// ("lib", pbhs.dpos_irreversible_blocknum) -// ("schedule", producer_authority_schedule::from_shared(gpo.proposed_schedule) ) ); + ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) + ("lib", pbhs.dpos_irreversible_blocknum) + ("schedule", producer_authority_schedule::from_shared(gpo.proposed_schedule) ) ); } EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, diff --git a/libraries/chain/include/eosio/chain/authority.hpp b/libraries/chain/include/eosio/chain/authority.hpp index 85a7969dfba..d2cd185c7a9 100644 --- a/libraries/chain/include/eosio/chain/authority.hpp +++ b/libraries/chain/include/eosio/chain/authority.hpp @@ -360,7 +360,6 @@ namespace fmt { template auto format( const fc::array& p, FormatContext& ctx ) { - return format_to( ctx.out(), "{}", std::string_view(p.begin(), N)); } }; diff --git a/libraries/chain/include/eosio/chain/producer_schedule.hpp b/libraries/chain/include/eosio/chain/producer_schedule.hpp index 849ccce61ee..acc5891d3a7 100644 --- a/libraries/chain/include/eosio/chain/producer_schedule.hpp +++ b/libraries/chain/include/eosio/chain/producer_schedule.hpp @@ -325,6 +325,40 @@ namespace eosio { namespace chain { } } /// eosio::chain +namespace fmt { + template<> + struct formatter { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const eosio::chain::block_signing_authority& p, FormatContext& ctx ) { + std::visit([&]( auto&& arg ) { + using T = std::decay_t; + if constexpr ( std::is_same_v ) + fmt::formatter().format(arg, ctx); + }, p); + return format_to( ctx.out(), ""); + } + }; + + template<> + struct formatter { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const eosio::chain::shared_block_signing_authority& p, FormatContext& ctx ) { + std::visit([&]( auto&& arg ) { + using T = std::decay_t; + if constexpr ( std::is_same_v ) + fmt::formatter().format(arg, ctx); + }, p); + return format_to( ctx.out(), ""); + } + }; +} + FC_REFLECT( eosio::chain::legacy::producer_key, (producer_name)(block_signing_key) ) FC_REFLECT( eosio::chain::legacy::producer_schedule_type, (version)(producers) ) FC_REFLECT( eosio::chain::block_signing_authority_v0, (threshold)(keys)) diff --git a/libraries/fc b/libraries/fc index c8ce1833e09..ef17c48766b 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit c8ce1833e09af67a6eeba9ed4a6a6b4937eae902 +Subproject commit ef17c48766b24afc369e49c343e633f30573cd9b From 4672f214d9ab2ff652f2c169960b43d382bb55ed Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Wed, 9 Mar 2022 18:26:15 -0500 Subject: [PATCH 20/31] Add formatters for trx trace --- libraries/chain/controller.cpp | 3 +- .../include/eosio/chain/action_receipt.hpp | 17 +++++ libraries/chain/include/eosio/chain/trace.hpp | 46 ++++++++++++++ libraries/chain/resource_limits.cpp | 63 ++++++++----------- 4 files changed, 91 insertions(+), 38 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index fe72960dd5e..3395c26c9ac 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1734,8 +1734,7 @@ struct controller_impl { bool transaction_failed = trace && trace->except; bool transaction_can_fail = receipt.status == transaction_receipt_header::hard_fail && std::holds_alternative(receipt.trx); if( transaction_failed && !transaction_can_fail) { - //TODO: add formatter for custom type `transaction_trace` -// edump((*trace)); + edump((*trace)); throw *trace->except; } diff --git a/libraries/chain/include/eosio/chain/action_receipt.hpp b/libraries/chain/include/eosio/chain/action_receipt.hpp index dec17c8fcac..ba500748013 100644 --- a/libraries/chain/include/eosio/chain/action_receipt.hpp +++ b/libraries/chain/include/eosio/chain/action_receipt.hpp @@ -31,5 +31,22 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain +namespace fmt { + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const boost::container::flat_map& p, FormatContext& ctx ) { + for (const auto& i : p) { + fmt::formatter().format(i.first, ctx); + fmt::formatter().format(i.second, ctx); + } + return format_to( ctx.out(), ""); + } + }; +} + FC_REFLECT( eosio::chain::action_receipt, (receiver)(act_digest)(global_sequence)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence) ) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index f8e20533067..afa9992c64e 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -113,6 +113,52 @@ namespace eosio { namespace chain { } } } /// namespace eosio::chain +namespace fmt { + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::optional& p, FormatContext& ctx ) { + return fmt::formatter().format(*p, ctx); + } + }; + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const boost::container::flat_set& p, FormatContext& ctx ) { + for (const auto& i : p) { + fmt::formatter().format(i, ctx); + } + return format_to( ctx.out(), ""); + } + }; + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::shared_ptr& p, FormatContext& ctx ) { + return fmt::formatter().format(*p, ctx); + } + }; + template<> + struct formatter { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const fc::exception& p, FormatContext& ctx ) { + return format_to( ctx.out(), "{}", p.to_detail_string()); + } + }; +} + FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 5c9e91271c6..74e314a202a 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -56,10 +56,9 @@ void resource_limits_manager::initialize_database() { // see default settings in the declaration if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for custom type `resource_limits_config_object` -// fc_dlog(*dm_logger, "RLIMIT_OP CONFIG INS ${data}", -// ("data", config) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP CONFIG INS ${data}", + ("data", config) + ); } }); @@ -71,10 +70,9 @@ void resource_limits_manager::initialize_database() { state.virtual_net_limit = config.net_limit_parameters.max; if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for custom type `resource_limits_state_object` -// fc_dlog(*dm_logger, "RLIMIT_OP STATE INS ${data}", -// ("data", state) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP STATE INS ${data}", + ("data", state) + ); } }); } @@ -124,10 +122,9 @@ void resource_limits_manager::initialize_account(const account_name& account) { bl.owner = account; if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for custom type `resource_limits_object` -// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS INS ${data}", -// ("data", bl) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS INS ${data}", + ("data", bl) + ); } }); @@ -135,10 +132,9 @@ void resource_limits_manager::initialize_account(const account_name& account) { bu.owner = account; if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for custom type `resource_usage_object` -// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE INS ${data}", -// ("data", bu) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE INS ${data}", + ("data", bu) + ); } }); } @@ -155,10 +151,9 @@ void resource_limits_manager::set_block_parameters(const elastic_limit_parameter c.net_limit_parameters = net_limit_parameters; if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for custom type `resource_limits_config_object` -// fc_dlog(*dm_logger, "RLIMIT_OP CONFIG UPD ${data}", -// ("data", c) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP CONFIG UPD ${data}", + ("data", c) + ); } }); } @@ -191,10 +186,9 @@ void resource_limits_manager::add_transaction_usage(const flat_set bu.cpu_usage.add( cpu_usage, time_slot, config.account_cpu_usage_average_window ); if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for the custom type of bu -// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE UPD ${data}", -// ("data", bu) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE UPD ${data}", + ("data", bu) + ); } }); @@ -330,10 +324,9 @@ bool resource_limits_manager::set_account_limits( const account_name& account, i pending_limits.cpu_weight = cpu_weight; if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for custom type `resource_limits_object` -// fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS UPD ${data}", -// ("data", pending_limits) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS UPD ${data}", + ("data", pending_limits) + ); } }); @@ -403,10 +396,9 @@ void resource_limits_manager::process_account_limit_updates() { } if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for the custom type of state -// fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", -// ("data", state) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", + ("data", state) + ); } }); } @@ -426,10 +418,9 @@ void resource_limits_manager::process_block_usage(uint32_t block_num) { state.pending_net_usage = 0; if (auto dm_logger = _get_deep_mind_logger()) { - //TODO: add formatter for the custom type of state -// fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", -// ("data", state) -// ); + fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", + ("data", state) + ); } }); From 6c68328843d5272c90110b7c750f2077731898bc Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Thu, 10 Mar 2022 11:51:04 -0500 Subject: [PATCH 21/31] Add formatters for unittests --- .../testing/include/eosio/testing/tester.hpp | 18 ++++++++++++++++++ .../state_history_plugin.cpp | 6 ++---- unittests/api_tests.cpp | 5 +++-- unittests/auth_tests.cpp | 9 +++------ unittests/currency_tests.cpp | 6 ++---- unittests/delay_tests.cpp | 3 +-- unittests/producer_schedule_tests.cpp | 3 +-- unittests/whitelist_blacklist_tests.cpp | 9 +++------ 8 files changed, 33 insertions(+), 26 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index c0e0a8c4a17..c3fbc9efd85 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -767,3 +767,21 @@ namespace eosio { namespace testing { }; } } /// eosio::testing + +#include +namespace fmt { + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::map& p, FormatContext& ctx ) { + for (const auto& i : p) { + fmt::formatter().format(i.first, ctx); + fmt::formatter().format(i.second, ctx); + } + return format_to( ctx.out(), ""); + } + }; +} diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index d3d5edfd9cf..bc62d6b7673 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -136,8 +136,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this std::enable_if_t> operator()(T&& req) { - //TODO: add formatter for custom type `get_blocks_request_v0` - //fc_ilog(_log, "received get_blocks_request = {req}", ("req",req) ); + fc_ilog(_log, "received get_blocks_request = {req}", ("req",req) ); auto request_span = fc_create_trace("get_blocks_request"); to_send_block_num = req.start_block_num; for (auto& cp : req.have_positions) { @@ -164,8 +163,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this #include "test_cfd_transaction.hpp" +#include + #define DUMMY_ACTION_DEFAULT_A 0x45 #define DUMMY_ACTION_DEFAULT_B 0xab11cd1244556677 #define DUMMY_ACTION_DEFAULT_C 0x7451ae12 @@ -1672,8 +1674,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(more_deferred_transaction_tests, TYPE_T, backing_s auto print_deferred = [&index]() { for( const auto& gto : index ) { - //TODO: add formatter for custom type `oid` defined in chainbase.hpp -// wlog("id = ${id}, trx_id = ${trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); + wlog("id = ${id}, trx_id = ${trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); } }; diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index f3d29ac7991..0d265775d72 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -68,22 +68,19 @@ BOOST_FIXTURE_TEST_CASE( delegate_auth, TESTER ) { try { }); auto original_auth = static_cast(control->get_authorization_manager().get_permission({"alice"_n, config::active_name}).auth); - //TODO: add formatter for custom type `authority` defined in authority.hpp -// wdump((original_auth)); + wdump((original_auth)); set_authority( "alice"_n, config::active_name, delegated_auth ); auto new_auth = static_cast(control->get_authorization_manager().get_permission({"alice"_n, config::active_name}).auth); - //TODO: add formatter for custom type `authority` defined in authority.hpp -// wdump((new_auth)); + wdump((new_auth)); BOOST_CHECK_EQUAL((new_auth == delegated_auth), true); produce_block(); produce_block(); auto auth = static_cast(control->get_authorization_manager().get_permission({"alice"_n, config::active_name}).auth); - //TODO: add formatter for custom type `authority` defined in authority.hpp -// wdump((auth)); + wdump((auth)); BOOST_CHECK_EQUAL((new_auth == auth), true); /// execute nonce from alice signed by bob diff --git a/unittests/currency_tests.cpp b/unittests/currency_tests.cpp index 3e6becc9ca8..f82d87d4c6a 100644 --- a/unittests/currency_tests.cpp +++ b/unittests/currency_tests.cpp @@ -83,16 +83,14 @@ class currency_tester : public TESTER { ("can_recall", 0) ("can_whitelist", 0) ); - //TODO: add formatter for custom type `transaction_trace` defined in trace.hpp -// wdump((result)); + wdump((result)); result = push_action("eosio.token"_n, "issue"_n, mutable_variant_object() ("to", eosio_token) ("quantity", "1000000.0000 CUR") ("memo", "gggggggggggg") ); - //TODO: add formatter for custom type `transaction_trace` defined in trace.hpp -// wdump((result)); + wdump((result)); produce_block(); } diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index e466874e69d..f4b8b994362 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -68,8 +68,7 @@ BOOST_FIXTURE_TEST_CASE( delay_error_create_account, validating_tester) { try { ilog( fc::json::to_pretty_string(trx) ); auto trace = push_transaction( trx ); - //TODO: add formatter for custom type `transaction_trace` defined in trace.hpp -// edump((*trace)); + edump((*trace)); produce_blocks(6); diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index 66e5d5bd625..3d9499bf27f 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -481,8 +481,7 @@ BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { wdump((alice_last_produced_block_num)); { - //TODO: add formatter for type `flat_map` used in block_header_state.hpp -// wdump((c.control->head_block_state()->producer_to_last_produced)); + wdump((c.control->head_block_state()->producer_to_last_produced)); const auto& last_produced = c.control->head_block_state()->producer_to_last_produced; auto alice_itr = last_produced.find( "alice"_n ); BOOST_REQUIRE( alice_itr != last_produced.end() ); diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index e1ce784af15..b5383ca3c4b 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -36,8 +36,7 @@ class whitelist_blacklist_tester { cfg.contract_blacklist = contract_blacklist; cfg.action_blacklist = action_blacklist; }, !shutdown_called); - //TODO: add formatter for type `map` -// wdump((last_produced_block)); + wdump((last_produced_block)); chain->set_last_produced_block_map( last_produced_block ); if( !bootstrap ) return; @@ -60,8 +59,7 @@ class whitelist_blacklist_tester { void shutdown() { FC_ASSERT( chain, "chain is not up" ); last_produced_block = chain->get_last_produced_block_map(); - //TODO: add formatter for type `map` -// wdump((last_produced_block)); + wdump((last_produced_block)); chain.reset(); shutdown_called = true; } @@ -804,8 +802,7 @@ BOOST_AUTO_TEST_CASE( greylist_limit_tests ) { try { BOOST_REQUIRE( rm.get_virtual_block_net_limit() > (3*cfg.max_block_net_usage) ); BOOST_REQUIRE( rm.get_virtual_block_net_limit() < (4*cfg.max_block_net_usage) ); - //TODO: add formatter for custom type `std::pair` defined in resource_limits.hpp -// wdump((rm.get_account_net_limit_ex(user_account))); + wdump((rm.get_account_net_limit_ex(user_account))); BOOST_REQUIRE( rm.get_account_net_limit_ex(user_account).first.max > 3*reqauth_net_charge ); BOOST_REQUIRE( rm.get_account_net_limit_ex(user_account).first.max < 4*reqauth_net_charge ); From 72bae5c270488c45cf5513d449a7b2fd2c0983c5 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 14 Mar 2022 19:10:17 -0400 Subject: [PATCH 22/31] Add formatter for trx and trx trace --- .../chain/include/eosio/chain/action.hpp | 5 ++- libraries/chain/include/eosio/chain/trace.hpp | 29 +++++++++++++++ .../chain/include/eosio/chain/transaction.hpp | 35 ++++++------------- plugins/producer_plugin/producer_plugin.cpp | 28 ++++++++------- 4 files changed, 59 insertions(+), 38 deletions(-) diff --git a/libraries/chain/include/eosio/chain/action.hpp b/libraries/chain/include/eosio/chain/action.hpp index a3c6ed604ae..402f806f03b 100644 --- a/libraries/chain/include/eosio/chain/action.hpp +++ b/libraries/chain/include/eosio/chain/action.hpp @@ -68,6 +68,9 @@ namespace eosio { namespace chain { struct action : public action_base { bytes data; + size_t size; + fc::sha256 code_hash; + bytes trimmed_hex; action() = default; @@ -135,4 +138,4 @@ namespace eosio { namespace chain { FC_REFLECT( eosio::chain::permission_level, (actor)(permission) ) FC_REFLECT( eosio::chain::action_base, (account)(name)(authorization) ) -FC_REFLECT_DERIVED( eosio::chain::action, (eosio::chain::action_base), (data) ) +FC_REFLECT_DERIVED( eosio::chain::action, (eosio::chain::action_base), (data)/*(size)(code_hash)(trimmed_hex)*/ ) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index afa9992c64e..c013c2df68f 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -3,6 +3,7 @@ #include #include #include +#include namespace eosio { namespace chain { @@ -157,6 +158,34 @@ namespace fmt { return format_to( ctx.out(), "{}", p.to_detail_string()); } }; + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::vector& p, FormatContext& ctx ) { + auto f = fmt::formatter(); + for( auto& i : p ) { + if constexpr (std::is_same_v){ + if( i.account == eosio::chain::config::system_account_name && i.name.to_string() == "setcode" ) { + auto setcode_act = i.template data_as(); + if( setcode_act.code.size() > 0 ) { + fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); + std::memcpy(i.code_hash.data(), code_hash.data(), code_hash.data_size()); + } + } + + if (i.data.size() > 64){ + i.size = i.data.size(); + std::memcpy(i.trimmed_hex.data(), i.data.data(), 64); + } + } + f.format( i, ctx ); + } + return format_to( ctx.out(), ""); + } + }; } FC_REFLECT( eosio::chain::account_delta, diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 6c3b46fcee0..287e3d9b996 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -356,30 +356,17 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain namespace fmt { - template - struct formatter> { - template - constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } - - template - auto format( const std::vector& p, FormatContext& ctx ) { - auto f = fmt::formatter(); - for( const auto& i : p ) { f.format( i, ctx ); } - return format_to( ctx.out(), ""); - } - }; - - template - struct formatter>{ - template - constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } - - template - auto format( const std::pair& p, FormatContext& ctx ) { - fmt::formatter().format(p.second, ctx); - return format_to( ctx.out(), ""); - } - }; + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::pair& p, FormatContext& ctx ) { + fmt::formatter().format(p.second, ctx); + return format_to( ctx.out(), ""); + } + }; } FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 02828c283a7..b6348b2e65f 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -384,9 +384,9 @@ class producer_plugin_impl : public std::enable_shared_from_thischain_plug->get_log_trx(trx->get_transaction()).as_string())); +// ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()))); // fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", -// ("trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); +// ("trx", self->chain_plug->get_log_trx(trx->get_transaction()))); }; try { auto result = future.get(); @@ -436,11 +436,16 @@ class producer_plugin_impl : public std::enable_shared_from_thisget_log_trx(trx->packed_trx()->get_transaction()).as_string())); ("trx", trx->packed_trx()->get_transaction())); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", -// ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) -// ("entire_trace", get_trace(response).as_string())); + if (std::holds_alternative(response)){ + fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) + ("entire_trace", *std::get(response))); + } else { + fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", + ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) + ("entire_trace", *std::get(response))); + } } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid}, auth: ${a} : ${why} ", ("txid", trx->id()) @@ -1478,8 +1483,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - //TODO: add formatter for custom type block_signing_authority_v0 -// elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; } else if ( _pause_production ) { elog("Not producing block because production is explicitly paused"); @@ -1595,9 +1599,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } std::swap( features_to_activate, protocol_features_to_activate ); _protocol_features_signaled = true; - //TODO: add formatter for custom type `vector` -// ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", -// ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); + ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } } @@ -1609,8 +1612,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - //TODO: add formatter for custom type block_signing_authority_v0 -// elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } From aeceeb4f364481af603155739458289cbb488f27 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 15 Mar 2022 00:31:13 -0400 Subject: [PATCH 23/31] Revert "Add formatter for trx and trx trace" that breaks consensus after adding members to action This reverts commit 72bae5c270488c45cf5513d449a7b2fd2c0983c5. --- .../chain/include/eosio/chain/action.hpp | 5 +-- libraries/chain/include/eosio/chain/trace.hpp | 29 --------------- .../chain/include/eosio/chain/transaction.hpp | 35 +++++++++++++------ plugins/producer_plugin/producer_plugin.cpp | 28 +++++++-------- 4 files changed, 38 insertions(+), 59 deletions(-) diff --git a/libraries/chain/include/eosio/chain/action.hpp b/libraries/chain/include/eosio/chain/action.hpp index 402f806f03b..a3c6ed604ae 100644 --- a/libraries/chain/include/eosio/chain/action.hpp +++ b/libraries/chain/include/eosio/chain/action.hpp @@ -68,9 +68,6 @@ namespace eosio { namespace chain { struct action : public action_base { bytes data; - size_t size; - fc::sha256 code_hash; - bytes trimmed_hex; action() = default; @@ -138,4 +135,4 @@ namespace eosio { namespace chain { FC_REFLECT( eosio::chain::permission_level, (actor)(permission) ) FC_REFLECT( eosio::chain::action_base, (account)(name)(authorization) ) -FC_REFLECT_DERIVED( eosio::chain::action, (eosio::chain::action_base), (data)/*(size)(code_hash)(trimmed_hex)*/ ) +FC_REFLECT_DERIVED( eosio::chain::action, (eosio::chain::action_base), (data) ) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index c013c2df68f..afa9992c64e 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -3,7 +3,6 @@ #include #include #include -#include namespace eosio { namespace chain { @@ -158,34 +157,6 @@ namespace fmt { return format_to( ctx.out(), "{}", p.to_detail_string()); } }; - template - struct formatter> { - template - constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } - - template - auto format( const std::vector& p, FormatContext& ctx ) { - auto f = fmt::formatter(); - for( auto& i : p ) { - if constexpr (std::is_same_v){ - if( i.account == eosio::chain::config::system_account_name && i.name.to_string() == "setcode" ) { - auto setcode_act = i.template data_as(); - if( setcode_act.code.size() > 0 ) { - fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); - std::memcpy(i.code_hash.data(), code_hash.data(), code_hash.data_size()); - } - } - - if (i.data.size() > 64){ - i.size = i.data.size(); - std::memcpy(i.trimmed_hex.data(), i.data.data(), 64); - } - } - f.format( i, ctx ); - } - return format_to( ctx.out(), ""); - } - }; } FC_REFLECT( eosio::chain::account_delta, diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 287e3d9b996..6c3b46fcee0 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -356,17 +356,30 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain namespace fmt { - template - struct formatter> { - template - constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } - - template - auto format( const std::pair& p, FormatContext& ctx ) { - fmt::formatter().format(p.second, ctx); - return format_to( ctx.out(), ""); - } - }; + template + struct formatter> { + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::vector& p, FormatContext& ctx ) { + auto f = fmt::formatter(); + for( const auto& i : p ) { f.format( i, ctx ); } + return format_to( ctx.out(), ""); + } + }; + + template + struct formatter>{ + template + constexpr auto parse( ParseContext& ctx ) { return ctx.begin(); } + + template + auto format( const std::pair& p, FormatContext& ctx ) { + fmt::formatter().format(p.second, ctx); + return format_to( ctx.out(), ""); + } + }; } FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b6348b2e65f..02828c283a7 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -384,9 +384,9 @@ class producer_plugin_impl : public std::enable_shared_from_thischain_plug->get_log_trx(trx->get_transaction()))); +// ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); // fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", -// ("trx", self->chain_plug->get_log_trx(trx->get_transaction()))); +// ("trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); }; try { auto result = future.get(); @@ -436,16 +436,11 @@ class producer_plugin_impl : public std::enable_shared_from_thisget_log_trx(trx->packed_trx()->get_transaction()).as_string())); ("trx", trx->packed_trx()->get_transaction())); - if (std::holds_alternative(response)){ - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) - ("entire_trace", *std::get(response))); - } else { - fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", - ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) - ("entire_trace", *std::get(response))); - } +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", +// ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) +// ("entire_trace", get_trace(response).as_string())); } else { fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid}, auth: ${a} : ${why} ", ("txid", trx->id()) @@ -1483,7 +1478,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + //TODO: add formatter for custom type block_signing_authority_v0 +// elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; } else if ( _pause_production ) { elog("Not producing block because production is explicitly paused"); @@ -1599,8 +1595,9 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } std::swap( features_to_activate, protocol_features_to_activate ); _protocol_features_signaled = true; - ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", - ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); + //TODO: add formatter for custom type `vector` +// ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", +// ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } } @@ -1612,7 +1609,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + //TODO: add formatter for custom type block_signing_authority_v0 +// elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } From 04c67bc826ef476c88e0d87da910f4f55c92428e Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Wed, 16 Mar 2022 11:44:53 -0400 Subject: [PATCH 24/31] Handle corner cases in custom formatters and remove extra quotes from the auto formatter --- libraries/chain/include/eosio/chain/authority.hpp | 3 +++ libraries/chain/include/eosio/chain/trace.hpp | 10 ++++++++-- libraries/chain/include/eosio/chain/transaction.hpp | 3 +++ libraries/fc | 2 +- plugins/producer_plugin/producer_plugin.cpp | 11 ++++------- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/libraries/chain/include/eosio/chain/authority.hpp b/libraries/chain/include/eosio/chain/authority.hpp index d2cd185c7a9..96e3d12deff 100644 --- a/libraries/chain/include/eosio/chain/authority.hpp +++ b/libraries/chain/include/eosio/chain/authority.hpp @@ -331,6 +331,9 @@ namespace fmt { template auto format( const eosio::chain::shared_vector& p, FormatContext& ctx ) { + if ( p.size() == 0) + return format_to( ctx.out(), "{}", "null"); + auto f = fmt::formatter(); for( const auto& i : p ) { f.format( i, ctx ); } return format_to( ctx.out(), ""); diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index afa9992c64e..ce92e1f5e8d 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -121,7 +121,10 @@ namespace fmt { template auto format( const std::optional& p, FormatContext& ctx ) { - return fmt::formatter().format(*p, ctx); + if (p.has_value()) + return fmt::formatter().format(*p, ctx); + else + return format_to( ctx.out(), ""); } }; template @@ -144,7 +147,10 @@ namespace fmt { template auto format( const std::shared_ptr& p, FormatContext& ctx ) { - return fmt::formatter().format(*p, ctx); + if (p) + return fmt::formatter().format(*p, ctx); + else + return format_to( ctx.out(), "{}", "null"); } }; template<> diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 6c3b46fcee0..e50fdcccd35 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -363,6 +363,9 @@ namespace fmt { template auto format( const std::vector& p, FormatContext& ctx ) { + if ( p.size() == 0) + return format_to( ctx.out(), "{}", "null"); + auto f = fmt::formatter(); for( const auto& i : p ) { f.format( i, ctx ); } return format_to( ctx.out(), ""); diff --git a/libraries/fc b/libraries/fc index ef17c48766b..9f87c6a3c43 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit ef17c48766b24afc369e49c343e633f30573cd9b +Subproject commit 9f87c6a3c43bd58f7590851762656239310d21d9 diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 02828c283a7..c4025d6c161 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1478,8 +1478,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - //TODO: add formatter for custom type block_signing_authority_v0 -// elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; } else if ( _pause_production ) { elog("Not producing block because production is explicitly paused"); @@ -1595,9 +1594,8 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } std::swap( features_to_activate, protocol_features_to_activate ); _protocol_features_signaled = true; - //TODO: add formatter for custom type `vector` -// ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", -// ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); + ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } } @@ -1609,8 +1607,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - //TODO: add formatter for custom type block_signing_authority_v0 -// elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } From 602807eaaa55c588e8c99419638796809bd99153 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Sun, 20 Mar 2022 02:40:50 -0400 Subject: [PATCH 25/31] Convert a transaction into a string --- libraries/chain/abi_serializer.cpp | 8 ++ .../include/eosio/chain/abi_serializer.hpp | 2 + plugins/chain_plugin/chain_plugin.cpp | 80 +++++++++++++++++++ .../eosio/chain_plugin/chain_plugin.hpp | 6 ++ plugins/producer_plugin/producer_plugin.cpp | 4 + 5 files changed, 100 insertions(+) diff --git a/libraries/chain/abi_serializer.cpp b/libraries/chain/abi_serializer.cpp index 82113c3a69d..94751df58c2 100644 --- a/libraries/chain/abi_serializer.cpp +++ b/libraries/chain/abi_serializer.cpp @@ -469,6 +469,14 @@ namespace eosio { namespace chain { return _binary_to_variant(type, binary, ctx); } + fc::variant abi_serializer::binary_to_log_variant( const std::string_view& type, const bytes& binary, const yield_function_t& yield, bool short_path )const { + impl::binary_to_variant_context ctx(*this, yield, type); + ctx.logging(); + ctx.short_path = short_path; + return _binary_to_variant(type, binary, ctx); + } + + void abi_serializer::_variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, impl::variant_to_binary_context& ctx )const { try { auto h = ctx.enter_scope(); diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index bace9db528b..b91f2aa9ebc 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -64,6 +64,8 @@ struct abi_serializer { fc::variant binary_to_variant( const std::string_view& type, const bytes& binary, const yield_function_t& yield, bool short_path = false )const; fc::variant binary_to_variant( const std::string_view& type, fc::datastream& binary, const yield_function_t& yield, bool short_path = false )const; + fc::variant binary_to_log_variant( const std::string_view& type, const bytes& binary, const yield_function_t& yield, bool short_path = false )const; + bytes variant_to_binary( const std::string_view& type, const fc::variant& var, const yield_function_t& yield, bool short_path = false )const; void variant_to_binary( const std::string_view& type, const fc::variant& var, fc::datastream& ds, const yield_function_t& yield, bool short_path = false )const; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 36fa3a696b1..6a34af0d34f 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -35,6 +35,8 @@ #include #include +#include + // reflect chainbase::environment for --print-build-info option FC_REFLECT_ENUM( chainbase::environment::os_t, (OS_LINUX)(OS_MACOS)(OS_WINDOWS)(OS_OTHER) ) @@ -3397,6 +3399,84 @@ fc::variant chain_plugin::get_log_trx(const transaction& trx) const { return pretty_output; } +void chain_plugin::to_trimmed_string(string& result, const action& a) const { + result += "\"account\":\"" + a.account.to_string() + "\"," + + "\"name\":\"" + a.name.to_string() + "\","; + to_trimmed_vector_string(result, "authorization", a.authorization); + result += ","; + + if( a.account == config::system_account_name && a.name == "setcode"_n ) { + auto setcode_act = a.data_as(); + if( setcode_act.code.size() > 0 ) { + result += "\"code_hash\":"; + fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); + result += "\"" + code_hash.str() + "\","; + } + } + + result += "\"data\":"; + abi_serializer::yield_function_t yield = abi_serializer::create_yield_function(my->chain->get_abi_serializer_max_time()); + auto abi = my->chain->get_abi_serializer(a.account, yield); + fc::variant output; + if (abi) { + auto type = abi->get_action_type(a.name); + if (!type.empty()) { + try { + output = abi->binary_to_log_variant(type, a.data, yield); + } catch (...) { + // any failure to serialize data, then leave as not serialized + } + result += fc::json::to_string(output, fc::time_point::maximum()); + } + } +} + +void chain_plugin::to_trimmed_string(string& result, const permission_level& perm) const { + result += "\"actor\":\"" + perm.actor.to_string() + "\"," + + "\"permission\":\"" + perm.permission.to_string() + "\""; +} + +void chain_plugin::to_trimmed_string(string& result, const std::pair>& p) const { + result += "\"key\":" + std::to_string(p.first) + "," + + "\"value\":" + std::string(p.second.begin(), p.second.end()); +} + +template +void chain_plugin::to_trimmed_vector_string(string& result, const char* name, const vector& vec) const { + result = result + "\"" + name + "\":["; + for (const auto& v : vec) { + result += "{"; + to_trimmed_string(result, v); + + result += "},"; + } + if (!vec.empty()) + result.pop_back(); //remove the last `,` + result += "]"; +} + +std::string chain_plugin::to_trimmed_trx_string(const transaction& t) { + static_assert(fc::reflector::total_member_count == 9); + + string result = "{"; + result += "\"expiration\":\"" + (std::string)t.expiration + "\"," + + "\"ref_block_num\":" + std::to_string(t.ref_block_num) + "," + + "\"ref_block_prefix\":" + std::to_string(t.ref_block_prefix) + "," + + "\"max_net_usage_words\":" + std::to_string(t.max_net_usage_words.value) + "," + + "\"max_cpu_usage_ms\":" + std::to_string(t.max_cpu_usage_ms) + "," + + "\"delay_sec\":" + std::to_string(t.delay_sec.value) + ","; + + to_trimmed_vector_string(result, "context_free_actions", t.context_free_actions); + result += ","; + to_trimmed_vector_string(result, "actions", t.actions); + result += ","; + to_trimmed_vector_string(result, "transaction_extensions", t.transaction_extensions); + + result += "}"; + return result; +} + + } // namespace eosio FC_REFLECT( eosio::chain_apis::detail::ram_market_exchange_state_t, (ignore1)(ignore2)(ignore3)(core_symbol)(ignore4) ) diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index d1da878b010..a11e4859634 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -1092,6 +1092,12 @@ class chain_plugin : public plugin { // return variant of trx for logging, trace is modified to minimize log output fc::variant get_log_trx(const transaction& trx) const; + void to_trimmed_string(string& result, const eosio::chain::action& a)const; + void to_trimmed_string(string& result, const eosio::chain::permission_level& perm)const; + void to_trimmed_string(string& result, const std::pair>& p)const; + template void to_trimmed_vector_string(string& result, const char* name, const vector& vec)const; + std::string to_trimmed_trx_string(const transaction& t); + private: static void log_guard_exception(const chain::guard_exception& e); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index c4025d6c161..257e054a687 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -465,6 +465,10 @@ class producer_plugin_impl : public std::enable_shared_from_thisset_level(spdlog::level::debug); + fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", + ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction()))); } else { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}, auth: ${a}", ("txid", trx->id()) From 2fbf9502a71ab876c94d3f4cf86e2606939ec139 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Sun, 20 Mar 2022 13:29:59 -0400 Subject: [PATCH 26/31] Update trx to string conversion --- plugins/chain_plugin/chain_plugin.cpp | 118 ++++++++++-------- .../eosio/chain_plugin/chain_plugin.hpp | 6 +- plugins/producer_plugin/producer_plugin.cpp | 2 +- 3 files changed, 70 insertions(+), 56 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 6a34af0d34f..89c06a2dcd8 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -3399,63 +3399,81 @@ fc::variant chain_plugin::get_log_trx(const transaction& trx) const { return pretty_output; } -void chain_plugin::to_trimmed_string(string& result, const action& a) const { - result += "\"account\":\"" + a.account.to_string() + "\"," - + "\"name\":\"" + a.name.to_string() + "\","; - to_trimmed_vector_string(result, "authorization", a.authorization); - result += ","; - - if( a.account == config::system_account_name && a.name == "setcode"_n ) { - auto setcode_act = a.data_as(); - if( setcode_act.code.size() > 0 ) { - result += "\"code_hash\":"; - fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); - result += "\"" + code_hash.str() + "\","; +namespace { + template void to_trimmed_vector_string(string& result, const char* name, const vector& vec, const controller& chain); + + void to_trimmed_string(string& result, const action& a, const controller& chain) { + result += "\"account\":\"" + a.account.to_string() + "\"," + + "\"name\":\"" + a.name.to_string() + "\","; + to_trimmed_vector_string(result, "authorization", a.authorization, chain); + result += ","; + + if( a.account == config::system_account_name && a.name == "setcode"_n ) { + auto setcode_act = a.data_as(); + if( setcode_act.code.size() > 0 ) { + result += "\"code_hash\":"; + fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); + result += "\"" + code_hash.str() + "\","; + } } - } - result += "\"data\":"; - abi_serializer::yield_function_t yield = abi_serializer::create_yield_function(my->chain->get_abi_serializer_max_time()); - auto abi = my->chain->get_abi_serializer(a.account, yield); - fc::variant output; - if (abi) { - auto type = abi->get_action_type(a.name); - if (!type.empty()) { - try { - output = abi->binary_to_log_variant(type, a.data, yield); - } catch (...) { - // any failure to serialize data, then leave as not serialized + result += "\"data\":"; + abi_serializer::yield_function_t yield = abi_serializer::create_yield_function(chain.get_abi_serializer_max_time()); + auto abi = chain.get_abi_serializer(a.account, yield); + fc::variant output; + if (abi) { + auto type = abi->get_action_type(a.name); + if (!type.empty()) { + try { + output = abi->binary_to_log_variant(type, a.data, yield); + result += fc::json::to_string(output, fc::time_point::maximum()); + result += ","; + result += "\"hex_data\":{"; + } catch (...) { + // any failure to serialize data, then leave as not serialized + result += "{"; + } + } else { + result += "{"; } - result += fc::json::to_string(output, fc::time_point::maximum()); + } else { + result += "{"; } - } -} -void chain_plugin::to_trimmed_string(string& result, const permission_level& perm) const { - result += "\"actor\":\"" + perm.actor.to_string() + "\"," - + "\"permission\":\"" + perm.permission.to_string() + "\""; -} + result += "\"size\":" + std::to_string(a.data.size()) + ","; + if( a.data.size() > impl::hex_log_max_size ) { + result += "\"trimmed_hex\":\"" + fc::to_hex(std::vector(a.data.begin(), a.data.begin() + impl::hex_log_max_size)) + "\""; + } else { + result += "\"hex\":\"" + fc::to_hex(a.data) + "\""; + } + result += "}"; + } -void chain_plugin::to_trimmed_string(string& result, const std::pair>& p) const { - result += "\"key\":" + std::to_string(p.first) + "," - + "\"value\":" + std::string(p.second.begin(), p.second.end()); -} + void to_trimmed_string(string& result, const permission_level& perm, const controller& chain) { + result += "\"actor\":\"" + perm.actor.to_string() + "\"," + + "\"permission\":\"" + perm.permission.to_string() + "\""; + } -template -void chain_plugin::to_trimmed_vector_string(string& result, const char* name, const vector& vec) const { - result = result + "\"" + name + "\":["; - for (const auto& v : vec) { - result += "{"; - to_trimmed_string(result, v); + void to_trimmed_string(string& result, const std::pair>& p, const controller& chain) { + result += "\"key\":" + std::to_string(p.first) + "," + + "\"value\":" + std::string(p.second.begin(), p.second.end()); + } - result += "},"; + template + void to_trimmed_vector_string(string& result, const char* name, const vector& vec, const controller& chain) { + result = result + "\"" + name + "\":["; + for (const auto& v : vec) { + result += "{"; + to_trimmed_string(result, v, chain); + result += "},"; + } + if (!vec.empty()) + result.pop_back(); //remove the last `,` + result += "]"; } - if (!vec.empty()) - result.pop_back(); //remove the last `,` - result += "]"; -} +} // namespace -std::string chain_plugin::to_trimmed_trx_string(const transaction& t) { +std::string chain_plugin::to_trimmed_trx_string(const transaction& t, const controller& chain) const { static_assert(fc::reflector::total_member_count == 9); string result = "{"; @@ -3466,11 +3484,11 @@ std::string chain_plugin::to_trimmed_trx_string(const transaction& t) { + "\"max_cpu_usage_ms\":" + std::to_string(t.max_cpu_usage_ms) + "," + "\"delay_sec\":" + std::to_string(t.delay_sec.value) + ","; - to_trimmed_vector_string(result, "context_free_actions", t.context_free_actions); + to_trimmed_vector_string(result, "context_free_actions", t.context_free_actions, chain); result += ","; - to_trimmed_vector_string(result, "actions", t.actions); + to_trimmed_vector_string(result, "actions", t.actions, chain); result += ","; - to_trimmed_vector_string(result, "transaction_extensions", t.transaction_extensions); + to_trimmed_vector_string(result, "transaction_extensions", t.transaction_extensions, chain); result += "}"; return result; diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index a11e4859634..20a978f5f5a 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -1092,11 +1092,7 @@ class chain_plugin : public plugin { // return variant of trx for logging, trace is modified to minimize log output fc::variant get_log_trx(const transaction& trx) const; - void to_trimmed_string(string& result, const eosio::chain::action& a)const; - void to_trimmed_string(string& result, const eosio::chain::permission_level& perm)const; - void to_trimmed_string(string& result, const std::pair>& p)const; - template void to_trimmed_vector_string(string& result, const char* name, const vector& vec)const; - std::string to_trimmed_trx_string(const transaction& t); + std::string to_trimmed_trx_string(const transaction& t, const controller& chain) const; private: static void log_guard_exception(const chain::guard_exception& e); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 257e054a687..afb76b5bdd3 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -468,7 +468,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisset_level(spdlog::level::debug); fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", - ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction()))); + ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction(), chain))); } else { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}, auth: ${a}", ("txid", trx->id()) From d9f2669a3463f4f9e6b992dbfa87789f151065d3 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Mon, 21 Mar 2022 00:59:11 -0400 Subject: [PATCH 27/31] Convert trx trace into log string --- libraries/chain/include/eosio/chain/trace.hpp | 4 + libraries/chain/trace.cpp | 206 ++++++++++++++++++ plugins/chain_plugin/chain_plugin.cpp | 7 + .../eosio/chain_plugin/chain_plugin.hpp | 1 + plugins/producer_plugin/producer_plugin.cpp | 20 +- 5 files changed, 232 insertions(+), 6 deletions(-) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index ce92e1f5e8d..8527c57be70 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -111,6 +111,10 @@ namespace eosio { namespace chain { inline storage_usage_trace generic_storage_usage_trace(uint32_t action_id) { return {action_id}; } + namespace trace { + void to_trimmed_trace_string(string& result, const transaction_trace& t, const controller& chain); + } + } } /// namespace eosio::chain namespace fmt { diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp index 8c21ee92ff4..02a9712d04d 100644 --- a/libraries/chain/trace.cpp +++ b/libraries/chain/trace.cpp @@ -1,5 +1,10 @@ #include #include +#include +#include +#include +#include + namespace eosio { namespace chain { @@ -35,4 +40,205 @@ action_trace::action_trace( ,producer_block_id( trace.producer_block_id ) {} +namespace trace { + template + void to_trimmed_trace_container_string(string& result, const char* name, const Container& vec, const controller& chain); + + void to_trimmed_string(string& result, const action_trace& at, const controller& chain) { + + result += "\"action_ordinal\":" + std::to_string(at.action_ordinal) + "," + + "\"creator_action_ordinal\":" + std::to_string(at.creator_action_ordinal) + "," + + "\"closest_unnotified_ancestor_action_ordinal\":" + std::to_string(at.closest_unnotified_ancestor_action_ordinal) + ","; + if (at.receipt.has_value()) { + result += "\"receipt\":{"; + result += "\"receiver\":\"" + at.receipt->receiver.to_string() + "\"" + "," + + "\"act_digest\":\"" + at.receipt->act_digest.str() + "\"" + "," + + "\"global_sequence\":" + std::to_string(at.receipt->global_sequence) + "," + + "\"recv_sequence\":" + std::to_string(at.receipt->recv_sequence) + ","; + result += "\"auth_sequence\":["; + auto itr = at.receipt->auth_sequence.find(at.receipt->receiver); + if (itr != at.receipt->auth_sequence.end()){ + result += "[\"" + itr->first.to_string() + "\"," + std::to_string(itr->second) + "]"; + } else { + result += "[]"; + } + result += "],"; + result += "\"code_sequence\":" + std::to_string(at.receipt->code_sequence) + "," + + "\"abi_sequence\":" + std::to_string(at.receipt->abi_sequence); + result += "}"; + } else { + result += "null"; + } + result += ","; + result += "\"receiver\":\"" + at.receiver.to_string() + "\"" + ","; + + // action trace + auto a = at.act; + result += "\"act\":{"; //act begin + result += "\"account\":\"" + a.account.to_string() + "\"," + + "\"name\":\"" + a.name.to_string() + "\","; + to_trimmed_trace_container_string(result, "authorization", a.authorization, chain); + result += ","; + + if( a.account == config::system_account_name && a.name == "setcode"_n ) { + auto setcode_act = a.data_as(); + if( setcode_act.code.size() > 0 ) { + result += "\"code_hash\":"; + fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); + result += "\"" + code_hash.str() + "\","; + } + } + + result += "\"data\":"; + abi_serializer::yield_function_t yield = abi_serializer::create_yield_function(chain.get_abi_serializer_max_time()); + auto abi = chain.get_abi_serializer(a.account, yield); + fc::variant output; + if (abi) { + auto type = abi->get_action_type(a.name); + if (!type.empty()) { + try { + output = abi->binary_to_log_variant(type, a.data, yield); + result += fc::json::to_string(output, fc::time_point::maximum()); + result += ","; + result += "\"hex_data\":{"; + } catch (...) { + // any failure to serialize data, then leave as not serialized + result += "{"; + } + } else { + result += "{"; + } + } else { + result += "{"; + } + + result += "\"size\":" + std::to_string(a.data.size()) + ","; + if( a.data.size() > impl::hex_log_max_size ) { + result += "\"trimmed_hex\":\"" + fc::to_hex(std::vector(a.data.begin(), a.data.begin() + impl::hex_log_max_size)) + "\""; + } else { + result += "\"hex\":\"" + fc::to_hex(a.data) + "\""; + } + result += "}"; + result += "}"; //act end + result += ","; + // action trace end + + result = result + "\"context_free\":" + (at.context_free ? "true" : "false") + ","; + result += "\"elapsed\":" + std::to_string(at.elapsed.count()) + "," + + "\"console\":\"" + at.console + "\"," + + "\"trx_id\":\"" + at.trx_id.str() + "\"," + + "\"block_num\":" + std::to_string(at.block_num) + "," + + "\"block_time\":\"" + (std::string)at.block_time.to_time_point() + "\"," + + "\"producer_block_id\":"; + if (at.producer_block_id.has_value()) { + result += "\"" + at.producer_block_id->str() + "\""; + } else { + result += "null"; + } + result += ","; + + // account_ram_deltas + to_trimmed_trace_container_string(result, "account_ram_deltas", at.account_ram_deltas, chain); + result += ","; + to_trimmed_trace_container_string(result, "account_disk_deltas", at.account_disk_deltas, chain); + result += ","; + + result += "\"except\":"; + if (at.except.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + result += "\"error_code\":"; + if (at.error_code.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + + result += "\"return_value\":\"" + std::string(at.return_value.begin(), at.return_value.end()) + "\""; + } + + void to_trimmed_string(string& result, const account_delta& ad, const controller& chain) { + result += "\"account\":\"" + ad.account.to_string() + "\"," + + "\"delta\":\"" + std::to_string(ad.delta) + "\""; + } + + void to_trimmed_string(string& result, const permission_level& perm, const controller& chain) { + result += "\"actor\":\"" + perm.actor.to_string() + "\"," + + "\"permission\":\"" + perm.permission.to_string() + "\""; + } + + template + void to_trimmed_trace_container_string(string& result, const char* name, const Container& vec, const controller& chain) { + result = result + "\"" + name + "\":["; + for (const auto& v : vec) { + result += "{"; + to_trimmed_string(result, v, chain); + result += "},"; + } + if (!vec.empty()) + result.pop_back(); //remove the last `,` + result += "]"; + } + + void to_trimmed_trace_string(string& result, const transaction_trace& t, const controller& chain) { + result = "{"; + result += "\"id\":\"" + (std::string)t.id + "\"," + + "\"block_num\":" + std::to_string(t.block_num) + "," + + "\"block_time\":\"" + (std::string)t.block_time.to_time_point() + "\"" + "," + + "\"producer_block_id\":" + ( t.producer_block_id.has_value() ? (std::string)t.producer_block_id.value() : "null" ) + ","; + if (t.receipt.has_value()) { + result += "\"receipt\":{"; + result += "\"status\":\"" + (std::string)t.receipt->status + "\"" + "," + + "\"cpu_usage_us\":" + std::to_string(t.receipt->cpu_usage_us) + "," + + "\"net_usage_words\":" + std::to_string(t.receipt->net_usage_words); + result += "}"; + } else{ + result += "null"; + } + result += ","; + result += "\"elapsed\":" + std::to_string(t.elapsed.count()) + "," + + "\"net_usage\":" + std::to_string(t.net_usage) + "," + + "\"scheduled\":" + (t.scheduled ? "true" : "false") + ","; + + // action_trace + to_trimmed_trace_container_string(result, "action_traces", t.action_traces, chain); + result += ","; + + result += "\"account_ram_delta\":"; + if (t.account_ram_delta.has_value()) { + result += "null"; // TODO... + } else { + result += "null"; + } + result += ","; + + result += "\"failed_dtrx_trace\":"; + result += "null"; // TODO... + result += ","; + + result += "\"except\":"; + if (t.except.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + result += "\"error_code\":"; + if (t.error_code.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + result += "\"except_ptr\":"; + result += "null"; //TODO... + + result += "}"; + } +} + } } // eosio::chain diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 89c06a2dcd8..1583cbc41b5 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -3494,6 +3494,13 @@ std::string chain_plugin::to_trimmed_trx_string(const transaction& t, const cont return result; } +std::string chain_plugin::get_log_trx_trace(const transaction_trace_ptr& t, const controller& chain) const { + static_assert( fc::reflector::total_member_count == 13); + string result; + eosio::chain::trace::to_trimmed_trace_string(result, *t, chain); + return result; +} + } // namespace eosio diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 20a978f5f5a..524c11360bb 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -1093,6 +1093,7 @@ class chain_plugin : public plugin { fc::variant get_log_trx(const transaction& trx) const; std::string to_trimmed_trx_string(const transaction& t, const controller& chain) const; + std::string get_log_trx_trace(const chain::transaction_trace_ptr& t, const controller& chain) const; private: static void log_guard_exception(const chain::guard_exception& e); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index afb76b5bdd3..81a57ca2ac2 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -418,11 +418,15 @@ class producer_plugin_impl : public std::enable_shared_from_this(except_ptr, trx)); - auto get_trace = [&](const std::variant& response) -> fc::variant { +// auto get_trace = [&](const std::variant& response) -> fc::variant { + auto get_trace = [&](const std::variant& response) -> string { if (std::holds_alternative(response)) { - return fc::variant{std::get(response)}; +// return fc::variant{std::get(response)}; + //return std::get(response); + return ""; // TODO... } else { - return chain_plug->get_log_trx_trace( std::get(response) ); + //return chain_plug->get_log_trx_trace( std::get(response) ); + return chain_plug->get_log_trx_trace(std::get(response), chain); } }; @@ -466,9 +470,13 @@ class producer_plugin_impl : public std::enable_shared_from_thisset_level(spdlog::level::debug); - fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", - ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction(), chain))); +// _trx_log.get_agent_logger()->set_level(spdlog::level::debug); +// fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", +// ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction(), chain))); + _trx_trace_success_log.get_agent_logger()->set_level(spdlog::level::debug); + fc_dlog(_trx_trace_success_log, "[TRX_TRACE] tx: ${entire_trace}", + ("entire_trace", get_trace(response))); + } else { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}, auth: ${a}", ("txid", trx->id()) From c98413e03cd522a1ad9666f929f35186837091b4 Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 22 Mar 2022 02:00:59 -0400 Subject: [PATCH 28/31] Remove dollar sign from new log format --- .../amqp/include/eosio/amqp/amqp_handler.hpp | 40 ++-- libraries/amqp/reliable_amqp_publisher.cpp | 4 +- libraries/amqp/retrying_amqp_connection.cpp | 20 +- libraries/chain/apply_context.cpp | 18 +- libraries/chain/authorization_manager.cpp | 8 +- libraries/chain/backing_store/db_context.cpp | 10 +- libraries/chain/block_log.cpp | 52 ++--- libraries/chain/controller.cpp | 76 +++---- libraries/chain/fork_database.cpp | 4 +- .../backing_store/kv_context_chainbase.hpp | 6 +- .../backing_store/kv_context_rocksdb.hpp | 6 +- .../chain/include/eosio/chain/log_catalog.hpp | 6 +- libraries/chain/include/eosio/chain/trace.hpp | 4 + libraries/chain/protocol_feature_manager.cpp | 2 +- libraries/chain/resource_limits.cpp | 20 +- libraries/chain/trace.cpp | 206 ++++++++++++++++++ libraries/chain/transaction_context.cpp | 4 +- .../runtimes/eos-vm-oc/code_cache.cpp | 4 +- .../include/b1/session/undo_stack.hpp | 2 +- libraries/fc | 2 +- .../rodeos/include/b1/rodeos/wasm_ql.hpp | 8 +- libraries/rodeos/rodeos.cpp | 18 +- libraries/rodeos/wasm_ql.cpp | 22 +- libraries/state_history/log.cpp | 30 +-- libraries/tpm-helpers/tpm-helpers.cpp | 2 +- .../amqp_trace_plugin_impl.cpp | 10 +- plugins/amqp_trx_plugin/amqp_trx_plugin.cpp | 14 +- .../fifo_trx_processing_queue.hpp | 4 +- plugins/amqp_trx_plugin/test/test_ordered.cpp | 2 +- .../test/test_ordered_full.cpp | 4 +- plugins/chain_plugin/account_query_db.cpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 59 ++--- .../eosio/chain_plugin/chain_plugin.hpp | 1 + .../http_client_plugin/http_client_plugin.cpp | 8 +- plugins/http_plugin/http_plugin.cpp | 66 +++--- plugins/net_plugin/net_plugin.cpp | 152 ++++++------- .../producer_plugin/subjective_billing.hpp | 2 +- plugins/producer_plugin/producer_plugin.cpp | 178 +++++++-------- .../producer_plugin/test/test_trx_full.cpp | 6 +- .../file_space_handler.hpp | 16 +- .../resource_monitor_plugin.cpp | 8 +- .../state_history_plugin.cpp | 28 +-- .../test_control_plugin.cpp | 10 +- plugins/trace_api_plugin/store_provider.cpp | 2 +- plugins/trace_api_plugin/trace_api_plugin.cpp | 4 +- .../txn_test_gen_plugin.cpp | 6 +- plugins/wallet_plugin/wallet.cpp | 6 +- programs/cleos/main.cpp | 14 +- programs/cleos_tpm/main.cpp | 16 +- programs/eosio-blocklog/main.cpp | 12 +- programs/eosio-tester/main.cpp | 10 +- programs/eosio-tpmattestcheck/main.cpp | 2 +- programs/eosio-tpmtool/main.cpp | 2 +- programs/keosd/main.cpp | 16 +- programs/nodeos-sectl/main.cpp | 4 +- programs/nodeos/main.cpp | 26 +-- programs/rodeos/cloner_plugin.cpp | 14 +- programs/rodeos/main.cpp | 26 +-- programs/rodeos/rocksdb_plugin.cpp | 2 +- programs/rodeos/ship_client.hpp | 12 +- programs/rodeos/streamer_plugin.cpp | 2 +- programs/rodeos/streams/logger.hpp | 2 +- programs/rodeos/streams/rabbitmq.hpp | 8 +- programs/rodeos/streams/stream.hpp | 2 +- programs/rodeos/wasm_ql_http.cpp | 14 +- unittests/api_tests.cpp | 2 +- unittests/db_to_kv_tests.cpp | 8 +- unittests/misc_tests.cpp | 2 +- unittests/whitelist_blacklist_tests.cpp | 4 +- 69 files changed, 792 insertions(+), 570 deletions(-) diff --git a/libraries/amqp/include/eosio/amqp/amqp_handler.hpp b/libraries/amqp/include/eosio/amqp/amqp_handler.hpp index cbb903b7ba0..c3b8fa0b7b4 100644 --- a/libraries/amqp/include/eosio/amqp/amqp_handler.hpp +++ b/libraries/amqp/include/eosio/amqp/amqp_handler.hpp @@ -42,7 +42,7 @@ class amqp_handler { [this](AMQP::Channel* c){channel_ready(c);}, [this](){channel_failed();} ) , on_error_( std::move( on_err ) ) { - ilog( "Connecting to AMQP address ${a} ...", ("a", amqp_connection_.address()) ); + ilog( "Connecting to AMQP address {a} ...", ("a", amqp_connection_.address()) ); wait(); } @@ -65,19 +65,19 @@ class amqp_handler { boost::asio::post( thread_pool_.get_executor(),[this, &cond, en=exchange_name, type]() { try { if( !channel_ ) { - elog( "AMQP not connected to channel ${a}", ("a", amqp_connection_.address()) ); + elog( "AMQP not connected to channel {a}", ("a", amqp_connection_.address()) ); on_error( "AMQP not connected to channel" ); return; } auto& exchange = channel_->declareExchange( en, type, AMQP::durable); exchange.onSuccess( [this, &cond, en]() { - dlog( "AMQP declare exchange successful, exchange ${e}, for ${a}", + dlog( "AMQP declare exchange successful, exchange {e}, for {a}", ("e", en)("a", amqp_connection_.address()) ); cond.set(); } ); exchange.onError([this, &cond, en](const char* error_message) { - elog( "AMQP unable to declare exchange ${e}, for ${a}", ("e", en)("a", amqp_connection_.address()) ); + elog( "AMQP unable to declare exchange {e}, for {a}", ("e", en)("a", amqp_connection_.address()) ); on_error( std::string("AMQP Queue error: ") + error_message ); cond.set(); }); @@ -87,7 +87,7 @@ class amqp_handler { } ); if( !cond.wait() ) { - elog( "AMQP timeout declaring exchange: ${q} for ${a}", ("q", exchange_name)("a", amqp_connection_.address()) ); + elog( "AMQP timeout declaring exchange: {q} for {a}", ("q", exchange_name)("a", amqp_connection_.address()) ); on_error( "AMQP timeout declaring exchange: " + exchange_name ); } } @@ -99,7 +99,7 @@ class amqp_handler { boost::asio::post( thread_pool_.get_executor(), [this, &cond, qn=queue_name]() mutable { try { if( !channel_ ) { - elog( "AMQP not connected to channel ${a}", ("a", amqp_connection_.address()) ); + elog( "AMQP not connected to channel {a}", ("a", amqp_connection_.address()) ); on_error( "AMQP not connected to channel" ); return; } @@ -107,12 +107,12 @@ class amqp_handler { auto& queue = channel_->declareQueue( qn, AMQP::durable ); queue.onSuccess( [this, &cond]( const std::string& name, uint32_t message_count, uint32_t consumer_count ) { - dlog( "AMQP queue ${q}, messages: ${mc}, consumers: ${cc}, for ${a}", + dlog( "AMQP queue {q}, messages: {mc}, consumers: {cc}, for {a}", ("q", name)("mc", message_count)("cc", consumer_count)("a", amqp_connection_.address()) ); cond.set(); } ); queue.onError( [this, &cond, qn]( const char* error_message ) { - elog( "AMQP error declaring queue ${q} for ${a}", ("q", qn)("a", amqp_connection_.address()) ); + elog( "AMQP error declaring queue {q} for {a}", ("q", qn)("a", amqp_connection_.address()) ); on_error( error_message ); cond.set(); } ); @@ -122,7 +122,7 @@ class amqp_handler { } ); if( !cond.wait() ) { - elog( "AMQP timeout declaring queue: ${q} for ${a}", ("q", queue_name)("a", amqp_connection_.address()) ); + elog( "AMQP timeout declaring queue: {q} for {a}", ("q", queue_name)("a", amqp_connection_.address()) ); on_error( "AMQP timeout declaring queue: " + queue_name ); } } @@ -140,7 +140,7 @@ class amqp_handler { cid=std::move(correlation_id), rt=std::move(reply_to), buf=std::move(buf)]() mutable { try { if( !my->channel_ ) { - elog( "AMQP not connected to channel ${a}", ("a", my->amqp_connection_.address()) ); + elog( "AMQP not connected to channel {a}", ("a", my->amqp_connection_.address()) ); my->on_error( "AMQP not connected to channel" ); return; } @@ -162,7 +162,7 @@ class amqp_handler { cid=std::move(correlation_id), rt=std::move(reply_to), f=std::move(f)]() mutable { try { if( !my->channel_ ) { - elog( "AMQP not connected to channel ${a}", ("a", my->amqp_connection_.address()) ); + elog( "AMQP not connected to channel {a}", ("a", my->amqp_connection_.address()) ); my->on_error( "AMQP not connected to channel" ); return; } @@ -281,14 +281,14 @@ class amqp_handler { // called from non-amqp thread void wait() { if( !first_connect_.wait() ) { - elog( "AMQP timeout connecting to: ${a}", ("a", amqp_connection_.address()) ); + elog( "AMQP timeout connecting to: {a}", ("a", amqp_connection_.address()) ); on_error( "AMQP timeout connecting" ); } } // called from amqp thread void channel_ready(AMQP::Channel* c) { - ilog( "AMQP Channel ready: ${id}, for ${a}", ("id", c ? c->id() : 0)("a", amqp_connection_.address()) ); + ilog( "AMQP Channel ready: {id}, for {a}", ("id", c ? c->id() : 0)("a", amqp_connection_.address()) ); channel_ = c; boost::system::error_code ec; timer_.cancel(ec); @@ -305,7 +305,7 @@ class amqp_handler { // called from amqp thread void channel_failed() { - wlog( "AMQP connection failed to: ${a}", ("a", amqp_connection_.address()) ); + wlog( "AMQP connection failed to: {a}", ("a", amqp_connection_.address()) ); channel_ = nullptr; // connection will automatically be retried by single_channel_retrying_amqp_connection @@ -329,19 +329,19 @@ class amqp_handler { channel_->recover(AMQP::requeue) .onSuccess( [&]() { dlog( "successfully started channel recovery" ); } ) .onError( [&]( const char* message ) { - elog( "channel recovery failed ${e}", ("e", message) ); + elog( "channel recovery failed {e}", ("e", message) ); on_error( "AMQP channel recovery failed" ); } ); } auto& consumer = channel_->consume(queue_name_); consumer.onSuccess([&](const std::string& consumer_tag) { - ilog("consume started, queue: ${q}, tag: ${tag}, for ${a}", + ilog("consume started, queue: {q}, tag: {tag}, for {a}", ("q", queue_name_)("tag", consumer_tag)("a", amqp_connection_.address())); consumer_tag_ = consumer_tag; }); consumer.onError([&](const char* message) { - elog("consume failed, queue ${q}, tag: ${t} error: ${e}, for ${a}", + elog("consume failed, queue {q}, tag: {t} error: {e}, for {a}", ("q", queue_name_)("t", consumer_tag_)("e", message)("a", amqp_connection_.address())); consumer_tag_.clear(); }); @@ -355,21 +355,21 @@ class amqp_handler { if( channel_ && on_consume_ && !consumer_tag_.empty() ) { auto& consumer = channel_->cancel(consumer_tag_); consumer.onSuccess([&, cb{std::move(on_cancel)}](const std::string& consumer_tag) { - ilog("consume stopped, queue: ${q}, tag: ${tag}, for ${a}", + ilog("consume stopped, queue: {q}, tag: {tag}, for {a}", ("q", queue_name_)("tag", consumer_tag)("a", amqp_connection_.address())); consumer_tag_.clear(); on_consume_ = nullptr; if( cb ) cb(consumer_tag); }); consumer.onError([&](const char* message) { - elog("cancel consume failed, queue ${q}, tag: ${t} error: ${e}, for ${a}", + elog("cancel consume failed, queue {q}, tag: {t} error: {e}, for {a}", ("q", queue_name_)("t", consumer_tag_)("e", message)("a", amqp_connection_.address())); consumer_tag_.clear(); on_consume_ = nullptr; on_error(message); }); } else { - wlog("Unable to stop consuming from queue: ${q}, tag: ${t}", ("q", queue_name_)("t", consumer_tag_)); + wlog("Unable to stop consuming from queue: {q}, tag: {t}", ("q", queue_name_)("t", consumer_tag_)); } } diff --git a/libraries/amqp/reliable_amqp_publisher.cpp b/libraries/amqp/reliable_amqp_publisher.cpp index 3102857d1da..e26dd5d75f5 100644 --- a/libraries/amqp/reliable_amqp_publisher.cpp +++ b/libraries/amqp/reliable_amqp_publisher.cpp @@ -84,7 +84,7 @@ reliable_amqp_publisher_impl::reliable_amqp_publisher_impl(const std::string& ur fc::raw::unpack(file, message_deque); if( !message_deque.empty() ) batch_num = message_deque.back().num; - ilog("AMQP existing persistent file ${f} loaded with ${c} unconfirmed messages for ${a} publishing to \"${e}\".", + ilog("AMQP existing persistent file {f} loaded with {c} unconfirmed messages for {a} publishing to \"{e}\".", ("f", data_file_path.generic_string())("c",message_deque.size())("a", retrying_connection.address())("e", exchange)); } FC_RETHROW_EXCEPTIONS(error, "Failed to load previously unconfirmed AMQP messages from ${f}", ("f", (fc::path)data_file_path)); } @@ -191,7 +191,7 @@ void reliable_amqp_publisher_impl::verify_max_queue_size() { constexpr unsigned max_queued_messages = 1u << 20u; if(message_deque.size() > max_queued_messages) { - elog("AMQP connection ${a} publishing to \"${e}\" has reached ${max} unconfirmed messages", + elog("AMQP connection {a} publishing to \"{e}\" has reached {max} unconfirmed messages", ("a", retrying_connection.address())("e", exchange)("max", max_queued_messages)); std::string err = "AMQP publishing to " + exchange + " has reached " + std::to_string(message_deque.size()) + " unconfirmed messages"; if( on_fatal_error) on_fatal_error(err); diff --git a/libraries/amqp/retrying_amqp_connection.cpp b/libraries/amqp/retrying_amqp_connection.cpp index 7525c033caf..d36ce6e4c6a 100644 --- a/libraries/amqp/retrying_amqp_connection.cpp +++ b/libraries/amqp/retrying_amqp_connection.cpp @@ -20,7 +20,7 @@ struct retrying_amqp_connection::impl : public AMQP::ConnectionHandler { } void onReady(AMQP::Connection* connection) override { - fc_ilog(_logger, "AMQP connection to ${s} is fully operational", ("s", _address)); + fc_ilog(_logger, "AMQP connection to {s} is fully operational", ("s", _address)); _ready_callback(connection); _indicated_ready = true; @@ -34,12 +34,12 @@ struct retrying_amqp_connection::impl : public AMQP::ConnectionHandler { } void onError(AMQP::Connection* connection, const char* message) override { - fc_elog(_logger, "AMQP connection to ${s} suffered an error; will retry shortly: ${m}", ("s", _address)("m", message)); + fc_elog(_logger, "AMQP connection to {s} suffered an error; will retry shortly: {m}", ("s", _address)("m", message)); schedule_retry(); } void onClosed(AMQP::Connection *connection) override { - fc_wlog(_logger, "AMQP connection to ${s} closed AMQP connection", ("s", _address)); + fc_wlog(_logger, "AMQP connection to {s} closed AMQP connection", ("s", _address)); schedule_retry(); } @@ -47,7 +47,7 @@ struct retrying_amqp_connection::impl : public AMQP::ConnectionHandler { _resolver.async_resolve(_address.hostname(), std::to_string(_address.port()), boost::asio::bind_executor(_strand, [this](const auto ec, const auto endpoints) { if(ec) { if(ec != boost::asio::error::operation_aborted) { - fc_wlog(_logger, "Failed resolving AMQP server ${s}; will retry shortly: ${m}", ("s", _address)("m", ec.message())); + fc_wlog(_logger, "Failed resolving AMQP server {s}; will retry shortly: {m}", ("s", _address)("m", ec.message())); schedule_retry(); } return; @@ -58,12 +58,12 @@ struct retrying_amqp_connection::impl : public AMQP::ConnectionHandler { boost::asio::async_connect(_sock, endpoints, boost::asio::bind_executor(_strand, [this](const auto ec, const auto endpoint) { if(ec) { if(ec != boost::asio::error::operation_aborted) { - fc_wlog(_logger, "Failed connecting AMQP server ${s}; will retry shortly: ${m}", ("s", _address)("m", ec.message())); + fc_wlog(_logger, "Failed connecting AMQP server {s}; will retry shortly: {m}", ("s", _address)("m", ec.message())); schedule_retry(); } return; } - fc_ilog(_logger, "TCP connection to AMQP server at ${s} is up", ("s", _address)); + fc_ilog(_logger, "TCP connection to AMQP server at {s} is up", ("s", _address)); receive_some(); _state->amqp_connection.emplace(this, _address.login(), _address.vhost()); })); @@ -109,7 +109,7 @@ struct retrying_amqp_connection::impl : public AMQP::ConnectionHandler { boost::asio::async_write(_sock, boost::asio::buffer(_state->outgoing_queue.front()), boost::asio::bind_executor(_strand, [this](const auto& ec, size_t wrote) { if(ec) { if(ec != boost::asio::error::operation_aborted) { - fc_wlog(_logger, "Failed writing to AMQP server ${s}; connection will retry shortly: ${m}", ("s", _address)("m", ec.message())); + fc_wlog(_logger, "Failed writing to AMQP server {s}; connection will retry shortly: {m}", ("s", _address)("m", ec.message())); schedule_retry(); } return; @@ -124,7 +124,7 @@ struct retrying_amqp_connection::impl : public AMQP::ConnectionHandler { _sock.async_read_some(boost::asio::buffer(_read_buff), boost::asio::bind_executor(_strand, [this](const auto& ec, size_t sz) { if(ec) { if(ec != boost::asio::error::operation_aborted) { - fc_wlog(_logger, "Failed reading from AMQP server ${s}; connection will retry shortly: ${m}", ("s", _address)("m", ec.message())); + fc_wlog(_logger, "Failed reading from AMQP server {s}; connection will retry shortly: {m}", ("s", _address)("m", ec.message())); schedule_retry(); } return; @@ -213,11 +213,11 @@ struct single_channel_retrying_amqp_connection::impl { _amqp_channel.emplace(_amqp_connection); } catch(...) { - fc_wlog(_logger, "AMQP channel could not start for AMQP connection ${c}; retrying", ("c", _connection.address())); + fc_wlog(_logger, "AMQP channel could not start for AMQP connection {c}; retrying", ("c", _connection.address())); start_retry(); } _amqp_channel->onError([this](const char* e) { - fc_wlog(_logger, "AMQP channel failure on AMQP connection ${c}; retrying : ${m}", ("c", _connection.address())("m", e)); + fc_wlog(_logger, "AMQP channel failure on AMQP connection {c}; retrying : {m}", ("c", _connection.address())("m", e)); _failed(); start_retry(); }); diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 38d216843fd..7b8d5067b6a 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -289,7 +289,7 @@ void apply_context::require_recipient( account_name recipient ) { ); if (auto dm_logger = control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "CREATION_OP NOTIFY ${action_id}", + fc_dlog(*dm_logger, "CREATION_OP NOTIFY {action_id}", ("action_id", get_action_id()) ); } @@ -395,7 +395,7 @@ void apply_context::execute_inline( action&& a ) { ); if (auto dm_logger = control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "CREATION_OP INLINE ${action_id}", + fc_dlog(*dm_logger, "CREATION_OP INLINE {action_id}", ("action_id", get_action_id()) ); } @@ -422,7 +422,7 @@ void apply_context::execute_context_free_inline( action&& a ) { ); if (auto dm_logger = control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "CREATION_OP CFA_INLINE ${action_id}", + fc_dlog(*dm_logger, "CREATION_OP CFA_INLINE {action_id}", ("action_id", get_action_id()) ); } @@ -583,7 +583,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a } if (auto dm_logger = control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "DTRX_OP MODIFY_CANCEL ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", + fc_dlog(*dm_logger, "DTRX_OP MODIFY_CANCEL {action_id} {sender} {sender_id} {payer} {published} {delay} {expiration} {trx_id} {trx}", ("action_id", get_action_id()) ("sender", receiver.to_string()) ("sender_id", sender_id) @@ -612,9 +612,9 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if (auto dm_logger = control.get_deep_mind_logger()) { operation = "update"; - event_id = STORAGE_EVENT_ID("${id}", ("id", gtx.id)); + event_id = STORAGE_EVENT_ID("{id}", ("id", gtx.id)); - fc_dlog(*dm_logger, "DTRX_OP MODIFY_CREATE ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", + fc_dlog(*dm_logger, "DTRX_OP MODIFY_CREATE {action_id} {sender} {sender_id} {payer} {published} {delay} {expiration} {trx_id} {trx}", ("action_id", get_action_id()) ("sender", receiver.to_string()) ("sender_id", sender_id) @@ -641,9 +641,9 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if (auto dm_logger = control.get_deep_mind_logger()) { operation = "add"; - event_id = STORAGE_EVENT_ID("${id}", ("id", gtx.id)); + event_id = STORAGE_EVENT_ID("{id}", ("id", gtx.id)); - fc_dlog(*dm_logger, "DTRX_OP CREATE ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", + fc_dlog(*dm_logger, "DTRX_OP CREATE {action_id} {sender} {sender_id} {payer} {published} {delay} {expiration} {trx_id} {trx}", ("action_id", get_action_id()) ("sender", receiver.to_string()) ("sender_id", sender_id) @@ -677,7 +677,7 @@ bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, acc if (auto dm_logger = control.get_deep_mind_logger()) { event_id = STORAGE_EVENT_ID("${id}", ("id", gto->id)); - fc_dlog(*dm_logger, "DTRX_OP CANCEL ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", + fc_dlog(*dm_logger, "DTRX_OP CANCEL {action_id} {sender} {sender_id} {payer} {published} {delay} {expiration} {trx_id} {trx}", ("action_id", get_action_id()) ("sender", receiver.to_string()) ("sender_id", sender_id) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index c8bdefb618f..a29e75458d0 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -159,7 +159,7 @@ namespace eosio { namespace chain { p.auth = auth; if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", + fc_dlog(*dm_logger, "PERM_OP INS {action_id} {permission_id} {data}", ("action_id", action_id) ("permission_id", p.id) ("data", p) @@ -199,7 +199,7 @@ namespace eosio { namespace chain { p.auth = std::move(auth); if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP INS ${action_id} ${permission_id} ${data}", + fc_dlog(*dm_logger, "PERM_OP INS {action_id} {permission_id} {data}", ("action_id", action_id) ("permission_id", p.id) ("data", p) @@ -223,7 +223,7 @@ namespace eosio { namespace chain { po.last_updated = _control.pending_block_time(); if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP UPD ${action_id} ${permission_id} ${old} ${new}", + fc_dlog(*dm_logger, "PERM_OP UPD {action_id} {permission_id} {old} {new}", ("action_id", action_id) ("permission_id", po.id) ("old", old_permission) @@ -242,7 +242,7 @@ namespace eosio { namespace chain { _db.get_mutable_index().remove_object( permission.usage_id._id ); if (auto dm_logger = _control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "PERM_OP REM ${action_id} ${permission_id} ${data}", + fc_dlog(*dm_logger, "PERM_OP REM {action_id} {permission_id} {data}", ("action_id", action_id) ("permission_id", permission.id) ("data", permission) diff --git a/libraries/chain/backing_store/db_context.cpp b/libraries/chain/backing_store/db_context.cpp index 8aa7ec64b8d..d405d4c1493 100644 --- a/libraries/chain/backing_store/db_context.cpp +++ b/libraries/chain/backing_store/db_context.cpp @@ -22,7 +22,7 @@ std::string db_context::table_event(name code, name scope, name table, name qual } void db_context::log_insert_table(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name payer) { - fc_dlog(deep_mind_logger, "TBL_OP INS ${action_id} ${code} ${scope} ${table} ${payer}", + fc_dlog(deep_mind_logger, "TBL_OP INS {action_id} {code} {scope} {table} {payer}", ("action_id", action_id) ("code", code.to_string()) ("scope", scope.to_string()) @@ -32,7 +32,7 @@ void db_context::log_insert_table(fc::logger& deep_mind_logger, uint32_t action_ } void db_context::log_remove_table(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name payer) { - fc_dlog(deep_mind_logger, "TBL_OP REM ${action_id} ${code} ${scope} ${table} ${payer}", + fc_dlog(deep_mind_logger, "TBL_OP REM {action_id} {code} {scope} {table} {payer}", ("action_id", action_id) ("code", code.to_string()) ("scope", scope.to_string()) @@ -43,7 +43,7 @@ void db_context::log_remove_table(fc::logger& deep_mind_logger, uint32_t action_ void db_context::log_row_insert(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name payer, account_name primkey, const char* buffer, size_t buffer_size) { - fc_dlog(deep_mind_logger, "DB_OP INS ${action_id} ${payer} ${table_code} ${scope} ${table_name} ${primkey} ${ndata}", + fc_dlog(deep_mind_logger, "DB_OP INS {action_id} {payer} {table_code} {scope} {table_name} {primkey} {ndata}", ("action_id", action_id) ("payer", payer.to_string()) ("table_code", code.to_string()) @@ -57,7 +57,7 @@ void db_context::log_row_insert(fc::logger& deep_mind_logger, uint32_t action_id void db_context::log_row_update(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name old_payer, account_name new_payer, account_name primkey, const char* old_buffer, size_t old_buffer_size, const char* new_buffer, size_t new_buffer_size) { - fc_dlog(deep_mind_logger, "DB_OP UPD ${action_id} ${opayer}:${npayer} ${table_code} ${scope} ${table_name} ${primkey} ${odata}:${ndata}", + fc_dlog(deep_mind_logger, "DB_OP UPD {action_id} {opayer}:{npayer} {table_code} {scope} {table_name} {primkey} {odata}:{ndata}", ("action_id", action_id) ("opayer", old_payer.to_string()) ("npayer", new_payer.to_string()) @@ -72,7 +72,7 @@ void db_context::log_row_update(fc::logger& deep_mind_logger, uint32_t action_id void db_context::log_row_remove(fc::logger& deep_mind_logger, uint32_t action_id, name code, name scope, name table, account_name payer, account_name primkey, const char* buffer, size_t buffer_size) { - fc_dlog(deep_mind_logger, "DB_OP REM ${action_id} ${payer} ${table_code} ${scope} ${table_name} ${primkey} ${odata}", + fc_dlog(deep_mind_logger, "DB_OP REM {action_id} {payer} {table_code} {scope} {table_name} {primkey} {odata}", ("action_id", action_id) ("payer", payer.to_string()) ("table_code", code.to_string()) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 7caa3bf2d71..80e1c92562d 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -367,14 +367,14 @@ namespace eosio { namespace chain { auto block_num = block_header::num_from_id(id); if (block_num != previous_block_num + 1) { - elog( "Block ${num} (${id}) skips blocks. Previous block in block log is block ${prev_num} (${previous})", + elog( "Block {num} ({id}) skips blocks. Previous block in block log is block {prev_num} ({previous})", ("num", block_num)("id", id) ("prev_num", previous_block_num)("previous", previous_block_id) ); } if (previous_block_id != block_id_type() && previous_block_id != header.previous) { - elog("Block ${num} (${id}) does not link back to previous block. " - "Expected previous: ${expected}. Actual previous: ${actual}.", + elog("Block {num} ({id}) does not link back to previous block. " + "Expected previous: {expected}. Actual previous: {actual}.", ("num", block_num)("id", id)("expected", previous_block_id)("actual", header.previous)); } @@ -460,17 +460,17 @@ namespace eosio { namespace chain { void block_log_data::construct_index(const fc::path& index_file_path) { std::string index_file_name = index_file_path.generic_string(); - ilog("Will write new blocks.index file ${file}", ("file", index_file_name)); + ilog("Will write new blocks.index file {file}", ("file", index_file_name)); const uint32_t num_blocks = this->num_blocks(); - ilog("block log version= ${version}", ("version", this->version())); + ilog("block log version= {version}", ("version", this->version())); if (num_blocks == 0) { return; } - ilog("first block= ${first} last block= ${last}", + ilog("first block= {first} last block= {last}", ("first", this->first_block_num())("last", (this->last_block_num()))); index_writer index(index_file_path, num_blocks); @@ -615,7 +615,7 @@ namespace eosio { namespace chain { future_version = preamble.version; EOS_ASSERT(catalog.verifier.chain_id.empty() || catalog.verifier.chain_id == preamble.chain_id(), block_log_exception, - "block log file ${path} has a different chain id", ("path", block_file.get_file_path())); + "block log file {path} has a different chain id", ("path", block_file.get_file_path())); genesis_written_to_block_log = true; // Assume it was constructed properly. @@ -799,7 +799,7 @@ namespace eosio { namespace chain { void block_log::reset(const chain_id_type& chain_id, uint32_t first_block_num) { EOS_ASSERT(first_block_num > 1, block_log_exception, - "Block log version ${ver} needs to be created with a genesis state if starting from block number 1."); + "Block log version {ver} needs to be created with a genesis state if starting from block number 1."); EOS_ASSERT(my->catalog.verifier.chain_id.empty() || chain_id == my->catalog.verifier.chain_id, block_log_exception, "Trying to reset to the chain to a different chain id"); @@ -874,8 +874,8 @@ namespace eosio { namespace chain { void block_log::construct_index(const fc::path& block_file_name, const fc::path& index_file_name) { - ilog("Will read existing blocks.log file ${file}", ("file", block_file_name.generic_string())); - ilog("Will write new blocks.index file ${file}", ("file", index_file_name.generic_string())); + ilog("Will read existing blocks.log file {file}", ("file", block_file_name.generic_string())); + ilog("Will write new blocks.index file {file}", ("file", index_file_name.generic_string())); block_log_data log_data(block_file_name); log_data.construct_index(index_file_name); @@ -888,8 +888,8 @@ namespace eosio { namespace chain { tail.open(fc::cfile::create_or_update_rw_mode); tail.write(start, size); - ilog("Data at tail end of block log which should contain the (incomplete) serialization of block ${num} " - "has been written out to '${tail_path}'.", + ilog("Data at tail end of block log which should contain the (incomplete) serialization of block {num} " + "has been written out to '{tail_path}'.", ("num", block_num + 1)("tail_path", tail_path.string())); } @@ -956,12 +956,12 @@ namespace eosio { namespace chain { if (strlen(reversible_block_dir_name) && fc::is_directory(blocks_dir/reversible_block_dir_name)) { fc::rename(blocks_dir/ reversible_block_dir_name, backup_dir/ reversible_block_dir_name); } - ilog("Moved existing blocks directory to backup location: '${new_blocks_dir}'", ("new_blocks_dir", backup_dir.string())); + ilog("Moved existing blocks directory to backup location: '{new_blocks_dir}'", ("new_blocks_dir", backup_dir.string())); const auto block_log_path = blocks_dir / "blocks.log"; const auto block_file_name = block_log_path.generic_string(); - ilog("Reconstructing '${new_block_log}' from backed up block log", ("new_block_log", block_file_name)); + ilog("Reconstructing '{new_block_log}' from backed up block log", ("new_block_log", block_file_name)); block_log_data log_data; auto ds = log_data.open(backup_dir / "blocks.log"); @@ -980,7 +980,7 @@ namespace eosio { namespace chain { while (ds.remaining() > 0 && block_num < truncate_at_block) { std::tie(block_num, block_id) = block_log_data::full_validate_block_entry(ds, block_num, block_id, entry); if (block_num % 1000 == 0) - ilog("Verified block ${num}", ("num", block_num)); + ilog("Verified block {num}", ("num", block_num)); pos = ds.tellp(); } } @@ -1002,13 +1002,13 @@ namespace eosio { namespace chain { new_block_file.write(log_data.data(), pos); if (error_msg.size()) { - ilog("Recovered only up to block number ${num}. " - "The block ${next_num} could not be deserialized from the block log due to error:\n${error_msg}", + ilog("Recovered only up to block number {num}. " + "The block {next_num} could not be deserialized from the block log due to error:\n{error_msg}", ("num", block_num)("next_num", block_num + 1)("error_msg", error_msg)); } else if (block_num == truncate_at_block && pos < log_data.size()) { - ilog("Stopped recovery of block log early at specified block number: ${stop}.", ("stop", truncate_at_block)); + ilog("Stopped recovery of block log early at specified block number: {stop}.", ("stop", truncate_at_block)); } else { - ilog("Existing block log was undamaged. Recovered all irreversible blocks up to block number ${num}.", + ilog("Existing block log was undamaged. Recovered all irreversible blocks up to block number {num}.", ("num", block_num)); } return backup_dir; @@ -1092,17 +1092,17 @@ namespace eosio { namespace chain { bool block_log::trim_blocklog_front(const fc::path& block_dir, const fc::path& temp_dir, uint32_t truncate_at_block) { EOS_ASSERT( block_dir != temp_dir, block_log_exception, "block_dir and temp_dir need to be different directories" ); - ilog("In directory ${dir} will trim all blocks before block ${n} from blocks.log and blocks.index.", + ilog("In directory {dir} will trim all blocks before block {n} from blocks.log and blocks.index.", ("dir", block_dir.generic_string())("n", truncate_at_block)); block_log_bundle log_bundle(block_dir); if (truncate_at_block <= log_bundle.log_data.first_block_num()) { - dlog("There are no blocks before block ${n} so do nothing.", ("n", truncate_at_block)); + dlog("There are no blocks before block {n} so do nothing.", ("n", truncate_at_block)); return false; } if (truncate_at_block > log_bundle.log_data.last_block_num()) { - dlog("All blocks are before block ${n} so do nothing (trim front would delete entire blocks.log).", ("n", truncate_at_block)); + dlog("All blocks are before block {n} so do nothing (trim front would delete entire blocks.log).", ("n", truncate_at_block)); return false; } @@ -1160,15 +1160,15 @@ namespace eosio { namespace chain { block_log_bundle log_bundle(block_dir); - ilog("In directory ${block_dir} will trim all blocks after block ${n} from ${block_file} and ${index_file}", + ilog("In directory {block_dir} will trim all blocks after block {n} from {block_file} and {index_file}", ("block_dir", block_dir.generic_string())("n", n)("block_file",log_bundle.block_file_name.generic_string())("index_file", log_bundle.index_file_name.generic_string())); if (n < log_bundle.log_data.first_block_num()) { - dlog("All blocks are after block ${n} so do nothing (trim_end would delete entire blocks.log)",("n", n)); + dlog("All blocks are after block {n} so do nothing (trim_end would delete entire blocks.log)",("n", n)); return 1; } if (n > log_bundle.log_data.last_block_num()) { - dlog("There are no blocks after block ${n} so do nothing",("n", n)); + dlog("There are no blocks after block {n} so do nothing",("n", n)); return 2; } @@ -1178,7 +1178,7 @@ namespace eosio { namespace chain { boost::filesystem::resize_file(log_bundle.block_file_name, to_trim_block_position); boost::filesystem::resize_file(log_bundle.index_file_name, index_file_size); - ilog("blocks.index has been trimmed to ${index_file_size} bytes", ("index_file_size", index_file_size)); + ilog("blocks.index has been trimmed to {index_file_size} bytes", ("index_file_size", index_file_size)); return 0; } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 3395c26c9ac..9ec06b4fd9e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -307,18 +307,18 @@ struct controller_impl { try { s( std::forward( a )); } catch (std::bad_alloc& e) { - wlog( "std::bad_alloc: ${w}", ("w", e.what()) ); + wlog( "std::bad_alloc: {w}", ("w", e.what()) ); throw e; } catch (boost::interprocess::bad_alloc& e) { - wlog( "boost::interprocess::bad alloc: ${w}", ("w", e.what()) ); + wlog( "boost::interprocess::bad alloc: {w}", ("w", e.what()) ); throw e; } catch ( controller_emit_signal_exception& e ) { - wlog( "controller_emit_signal_exception: ${details}", ("details", e.to_detail_string()) ); + wlog( "controller_emit_signal_exception: {details}", ("details", e.to_detail_string()) ); throw e; } catch ( fc::exception& e ) { - wlog( "fc::exception: ${details}", ("details", e.to_detail_string()) ); + wlog( "fc::exception: {details}", ("details", e.to_detail_string()) ); } catch ( std::exception& e ) { - wlog( "std::exception: ${details}", ("details", e.what()) ); + wlog( "std::exception: {details}", ("details", e.what()) ); } catch ( ... ) { wlog( "signal handler threw exception" ); } @@ -430,7 +430,7 @@ struct controller_impl { std::exception_ptr except_ptr; if( blog_head && start_block_num <= blog_head->block_num() ) { - ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", + ilog( "existing block log, attempting to replay from {s} to {n} blocks", ("s", start_block_num)("n", blog_head->block_num()) ); try { while( std::unique_ptr next = blog.read_signed_block_by_num( head->block_num + 1 ) ) { @@ -438,25 +438,25 @@ struct controller_impl { replay_push_block( std::move(next), controller::block_status::irreversible ); if( check_shutdown() ) break; if( block_num % 500 == 0 ) { - ilog( "${n} of ${head}", ("n", block_num)("head", blog_head->block_num()) ); + ilog( "{n} of {head}", ("n", block_num)("head", blog_head->block_num()) ); } } } catch( const database_guard_exception& e ) { except_ptr = std::current_exception(); } - ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); + ilog( "{n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); auto pending_head = fork_db.pending_head(); if( pending_head ) { - ilog( "fork database head ${h}, root ${r}", ("h", pending_head->block_num)( "r", fork_db.root()->block_num ) ); + ilog( "fork database head {h}, root {r}", ("h", pending_head->block_num)( "r", fork_db.root()->block_num ) ); if( pending_head->block_num < head->block_num || head->block_num < fork_db.root()->block_num ) { - ilog( "resetting fork database with new last irreversible block as the new root: ${id}", ("id", head->id) ); + ilog( "resetting fork database with new last irreversible block as the new root: {id}", ("id", head->id) ); fork_db.reset( *head ); } else if( head->block_num != fork_db.root()->block_num ) { auto new_root = fork_db.search_on_branch( pending_head->id, head->block_num ); EOS_ASSERT( new_root, fork_database_exception, "unexpected error: could not find new LIB in fork database" ); - ilog( "advancing fork database root to new last irreversible block within existing fork database: ${id}", + ilog( "advancing fork database root to new last irreversible block within existing fork database: {id}", ("id", new_root->id) ); fork_db.mark_valid( new_root ); fork_db.advance_root( new_root->id ); @@ -481,7 +481,7 @@ struct controller_impl { ++rev; replay_push_block( (*i)->block, controller::block_status::validated ); } - ilog( "${n} reversible blocks replayed", ("n",rev) ); + ilog( "{n} reversible blocks replayed", ("n",rev) ); } if( !fork_db.head() ) { @@ -489,7 +489,7 @@ struct controller_impl { } auto end = fc::time_point::now(); - ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", + ilog( "replayed {n} blocks in {duration} seconds, {mspb} ms/block", ("n", head->block_num + 1 - start_block_num)("duration", (end-start).count()/1000000) ("mspb", ((end-start).count()/1000.0)/(head->block_num-start_block_num)) ); replay_head_time.reset(); @@ -522,7 +522,7 @@ struct controller_impl { blog.reset( chain_id, lib_num + 1 ); } const auto hash = calculate_integrity_hash(); - ilog( "database initialized with hash: ${hash}", ("hash", hash) ); + ilog( "database initialized with hash: {hash}", ("hash", hash) ); init(check_shutdown, true); } catch (boost::interprocess::bad_alloc& e) { @@ -645,7 +645,7 @@ struct controller_impl { ("db",db.revision())("head",head->block_num) ); if( db.revision() > head->block_num ) { - wlog( "database revision (${db}) is greater than head block number (${head}), " + wlog( "database revision ({db}) is greater than head block number ({head}), " "attempting to undo pending changes", ("db",db.revision())("head",head->block_num) ); } @@ -659,14 +659,14 @@ struct controller_impl { // FIXME: We should probably feed that from CMake directly somehow ... fc_dlog(*dm_logger, "DEEP_MIND_VERSION 13 0"); - fc_dlog(*dm_logger, "ABIDUMP START ${block_num} ${global_sequence_num}", + fc_dlog(*dm_logger, "ABIDUMP START {block_num} {global_sequence_num}", ("block_num", head->block_num) ("global_sequence_num", db.get().global_action_sequence) ); const auto& idx = db.get_index(); for (auto& row : idx.indices()) { if (row.abi.size() != 0) { - fc_dlog(*dm_logger, "ABIDUMP ABI ${contract} ${abi}", + fc_dlog(*dm_logger, "ABIDUMP ABI {contract} {abi}", ("contract", row.name.to_string()) ("abi", std::string_view(row.abi.data())) ); @@ -693,7 +693,7 @@ struct controller_impl { pending_head->id != fork_db.head()->id; pending_head = fork_db.pending_head() ) { - wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id) ); + wlog( "applying branch from fork database ending with block: {id}", ("id", pending_head->id) ); maybe_switch_forks( pending_head, controller::block_status::complete, forked_branch_callback{}, trx_meta_cache_lookup{} ); } } @@ -871,7 +871,7 @@ struct controller_impl { if (auto dm_logger = get_deep_mind_logger()) { auto packed_trx = fc::raw::pack(etrx); - fc_dlog(*dm_logger, "TRX_OP CREATE onerror ${id} ${trx}", + fc_dlog(*dm_logger, "TRX_OP CREATE onerror {id} {trx}", ("id", etrx.id()) ("trx", fc::to_hex(packed_trx)) ); @@ -927,7 +927,7 @@ struct controller_impl { int64_t remove_scheduled_transaction( const generated_transaction_object& gto ) { std::string event_id; if (get_deep_mind_logger() != nullptr) { - event_id = STORAGE_EVENT_ID("${id}", ("id", gto.id)); + event_id = STORAGE_EVENT_ID("{id}", ("id", gto.id)); } int64_t ram_delta = -(config::billable_size_v + gto.packed_trx.size()); @@ -1041,7 +1041,7 @@ struct controller_impl { trace->elapsed = fc::time_point::now() - trx_context.start; if (auto dm_logger = get_deep_mind_logger()) { - fc_dlog(*dm_logger, "DTRX_OP FAILED ${action_id}", + fc_dlog(*dm_logger, "DTRX_OP FAILED {action_id}", ("action_id", trx_context.get_action_id()) ); } @@ -1334,7 +1334,7 @@ struct controller_impl { if (auto dm_logger = get_deep_mind_logger()) { // The head block represents the block just before this one that is about to start, so add 1 to get this block num - fc_dlog(*dm_logger, "START_BLOCK ${block_num}", ("block_num", head->block_num + 1)); + fc_dlog(*dm_logger, "START_BLOCK {block_num}", ("block_num", head->block_num + 1)); } emit( self.block_start, head->block_num + 1 ); @@ -1437,7 +1437,7 @@ struct controller_impl { { // Promote proposed schedule to pending schedule. if( !replay_head_time ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ilog( "promoting proposed schedule (set in block {proposed_num}) to pending; current block: {n} lib: {lib} schedule: {schedule} ", ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) ("lib", pbhs.dpos_irreversible_blocknum) ("schedule", producer_authority_schedule::from_shared(gpo.proposed_schedule) ) ); @@ -1537,7 +1537,7 @@ struct controller_impl { create_block_summary( id ); /* - ilog( "finalized block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", + ilog( "finalized block {n} ({id}) at {t} by {p} ({signing_key}); schedule_version: {v} lib: {lib} #dtrxs: {ndtrxs} {np}", ("n",pbhs.block_num) ("id",id) ("t",pbhs.timestamp) @@ -1832,7 +1832,7 @@ struct controller_impl { const auto& b = bsp->block; if( conf.terminate_at_block > 0 && conf.terminate_at_block < self.head_block_num()) { - ilog("Reached configured maximum block ${num}; terminating", ("num", conf.terminate_at_block) ); + ilog("Reached configured maximum block {num}; terminating", ("num", conf.terminate_at_block) ); shutdown(); return bsp; } @@ -1868,7 +1868,7 @@ struct controller_impl { block_validate_exception, "invalid block status for replay" ); if( conf.terminate_at_block > 0 && conf.terminate_at_block < self.head_block_num() ) { - ilog("Reached configured maximum block ${num}; terminating", ("num", conf.terminate_at_block) ); + ilog("Reached configured maximum block {num}; terminating", ("num", conf.terminate_at_block) ); shutdown(); return; } @@ -1929,11 +1929,11 @@ struct controller_impl { } } else if( new_head->id != head->id ) { auto old_head = head; - ilog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})", + ilog("switching forks from {current_head_id} (block number {current_head_num}) to {new_head_id} (block number {new_head_num})", ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); if (auto dm_logger = get_deep_mind_logger()) { - fc_dlog(*dm_logger, "SWITCH_FORK ${from_id} ${to_id}", + fc_dlog(*dm_logger, "SWITCH_FORK {from_id} {to_id}", ("from_id", head->id) ("to_id", new_head->id) ); @@ -1963,10 +1963,10 @@ struct controller_impl { } catch ( const boost::interprocess::bad_alloc& ) { throw; } catch (const fc::exception& e) { - elog("exception thrown while switching forks ${e}", ("e", e.to_detail_string())); + elog("exception thrown while switching forks {e}", ("e", e.to_detail_string())); except = std::current_exception(); } catch (const std::exception& e) { - elog("exception thrown while switching forks ${e}", ("e", e.what())); + elog("exception thrown while switching forks {e}", ("e", e.what())); except = std::current_exception(); } @@ -1993,7 +1993,7 @@ struct controller_impl { } // end if exception } /// end for each block in branch - ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id)); + ilog("successfully switched fork to new head {new_head_id}", ("new_head_id", new_head->id)); } else { head_changed = false; } @@ -2248,7 +2248,7 @@ struct controller_impl { if (auto dm_logger = get_deep_mind_logger()) { auto packed_trx = fc::raw::pack(trx); - fc_dlog(*dm_logger, "TRX_OP CREATE onblock ${id} ${trx}", + fc_dlog(*dm_logger, "TRX_OP CREATE onblock {id} {trx}", ("id", trx.id()) ("trx", fc::to_hex(packed_trx)) ); @@ -2448,7 +2448,7 @@ void controller::preactivate_feature( uint32_t action_id, const digest_type& fea if (auto dm_logger = get_deep_mind_logger()) { const auto feature = pfs.get_protocol_feature(feature_digest); - fc_dlog(*dm_logger, "FEATURE_OP PRE_ACTIVATE ${action_id} ${feature_digest} ${feature}", + fc_dlog(*dm_logger, "FEATURE_OP PRE_ACTIVATE {action_id} {feature_digest} {feature}", ("action_id", action_id) ("feature_digest", feature_digest) ("feature", feature.to_variant().as_string()) @@ -2839,7 +2839,7 @@ int64_t controller::set_proposed_producers( vector producers int64_t version = sch.version; - ilog( "proposed producer schedule with version ${v}", ("v", version) ); + ilog( "proposed producer schedule with version {v}", ("v", version) ); my->db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = cur_block_num; @@ -3128,7 +3128,7 @@ void controller::add_to_ram_correction( account_name account, uint64_t ram_bytes } if (auto dm_logger = get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RAM_CORRECTION_OP ${action_id} ${correction_id} ${event_id} ${payer} ${delta}", + fc_dlog(*dm_logger, "RAM_CORRECTION_OP {action_id} {correction_id} {event_id} {payer} {delta}", ("action_id", action_id) ("correction_id", correction_object_id) ("event_id", event_id) @@ -3225,7 +3225,7 @@ std::optional controller::extract_chain_id_from_db( const path& s } void controller::replace_producer_keys( const public_key_type& key ) { - ilog("Replace producer keys with ${k}", ("k", key.to_string())); + ilog("Replace producer keys with {k}", ("k", key.to_string())); mutable_db().modify( db().get(), [&]( auto& gp ) { gp.proposed_schedule_block_num = {}; gp.proposed_schedule.version = 0; @@ -3235,7 +3235,7 @@ void controller::replace_producer_keys( const public_key_type& key ) { my->head->pending_schedule = {}; my->head->pending_schedule.schedule.version = version; for (auto& prod: my->head->active_schedule.producers ) { - ilog("${n}", ("n", prod.producer_name.to_string())); + ilog("{n}", ("n", prod.producer_name.to_string())); std::visit([&](auto &auth) { auth.threshold = 1; auth.keys = {key_weight{key, 1}}; @@ -3282,7 +3282,7 @@ void controller_impl::on_activation(itr->ram_correction); if( itr->ram_correction > static_cast(current_ram_usage) ) { ram_delta = -current_ram_usage; - elog( "account ${name} was to be reduced by ${adjust} bytes of RAM despite only using ${current} bytes of RAM", + elog( "account {name} was to be reduced by {adjust} bytes of RAM despite only using {current} bytes of RAM", ("name", itr->name.to_string())("adjust", itr->ram_correction)("current", current_ram_usage) ); } diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index e14975cb37c..32903d32a0f 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -167,7 +167,7 @@ namespace eosio { namespace chain { if( !my->root ) { if( my->index.size() > 0 ) { - elog( "fork_database is in a bad state when closing; not writing out '${filename}'", + elog( "fork_database is in a bad state when closing; not writing out '{filename}'", ("filename", fork_db_dat.generic_string()) ); } return; @@ -218,7 +218,7 @@ namespace eosio { namespace chain { if( my->head ) { fc::raw::pack( out, my->head->id ); } else { - elog( "head not set in fork database; '${filename}' will be corrupted", + elog( "head not set in fork database; '{filename}' will be corrupted", ("filename", fork_db_dat.generic_string()) ); } diff --git a/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp b/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp index b61302d8a4b..5b47866a6ee 100644 --- a/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp +++ b/libraries/chain/include/eosio/chain/backing_store/kv_context_chainbase.hpp @@ -170,7 +170,7 @@ namespace eosio { namespace chain { const int64_t resource_delta = erase_table_usage(resource_manager, kv->payer, key, kv->kv_key.size(), kv->kv_value.size()); if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "KV_OP REM ${action_id} ${db} ${payer} ${key} ${odata}", + fc_dlog(*dm_logger, "KV_OP REM {action_id} {db} {payer} {key} {odata}", ("action_id", resource_manager._context->get_action_id()) ("contract", name{ contract }.to_string()) ("payer", kv->payer.to_string()) @@ -196,7 +196,7 @@ namespace eosio { namespace chain { const auto resource_delta = update_table_usage(resource_manager, kv->payer, payer, key, key_size, kv->kv_value.size(), value_size); if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "KV_OP UPD ${action_id} ${db} ${payer} ${key} ${odata}:${ndata}", + fc_dlog(*dm_logger, "KV_OP UPD {action_id} {db} {payer} {key} {odata}:{ndata}", ("action_id", resource_manager._context->get_action_id()) ("contract", name{ contract }.to_string()) ("payer", payer.to_string()) @@ -221,7 +221,7 @@ namespace eosio { namespace chain { }); if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "KV_OP INS ${action_id} ${db} ${payer} ${key} ${ndata}", + fc_dlog(*dm_logger, "KV_OP INS {action_id} {db} {payer} {key} {ndata}", ("action_id", resource_manager._context->get_action_id()) ("contract", name{ contract }.to_string()) ("payer", payer.to_string()) diff --git a/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp b/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp index b1fac50346f..7ee1403d66b 100644 --- a/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp +++ b/libraries/chain/include/eosio/chain/backing_store/kv_context_rocksdb.hpp @@ -363,7 +363,7 @@ namespace eosio { namespace chain { const int64_t resource_delta = erase_table_usage(resource_manager, pp->payer, key, key_size, pp->value_size); if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "KV_OP REM ${action_id} ${db} ${payer} ${key} ${odata}", + fc_dlog(*dm_logger, "KV_OP REM {action_id} {db} {payer} {key} {odata}", ("action_id", resource_manager._context->get_action_id()) ("contract", contract_name.to_string()) ("payer", pp->payer.to_string()) @@ -406,7 +406,7 @@ namespace eosio { namespace chain { update_table_usage(resource_manager, old_pp->payer, payer, key, key_size, old_pp->value_size, value_size); if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "KV_OP UPD ${action_id} ${db} ${payer} ${key} ${odata}:${ndata}", + fc_dlog(*dm_logger, "KV_OP UPD {action_id} {db} {payer} {key} {odata}:{ndata}", ("action_id", resource_manager._context->get_action_id()) ("contract", contract_name.to_string()) ("payer", payer.to_string()) @@ -419,7 +419,7 @@ namespace eosio { namespace chain { resource_delta = create_table_usage(resource_manager, payer, key, key_size, value_size); if (auto dm_logger = resource_manager._context->control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "KV_OP INS ${action_id} ${db} ${payer} ${key} ${ndata}", + fc_dlog(*dm_logger, "KV_OP INS {action_id} {db} {payer} {key} {ndata}", ("action_id", resource_manager._context->get_action_id()) ("contract", contract_name.to_string()) ("payer", payer.to_string()) diff --git a/libraries/chain/include/eosio/chain/log_catalog.hpp b/libraries/chain/include/eosio/chain/log_catalog.hpp index 48f17cd156e..2dea4c21700 100644 --- a/libraries/chain/include/eosio/chain/log_catalog.hpp +++ b/libraries/chain/include/eosio/chain/log_catalog.hpp @@ -97,13 +97,13 @@ struct log_catalog { auto existing_itr = collection.find(log.first_block_num()); if (existing_itr != collection.end()) { if (log.last_block_num() <= existing_itr->second.last_block_num) { - wlog("${log_path} contains the overlapping range with ${existing_path}.log, dropping ${log_path} " + wlog("{log_path} contains the overlapping range with {existing_path}.log, dropping {log_path} " "from catalog", ("log_path", log_path.string())("existing_path", existing_itr->second.filename_base.string())); return; } else { wlog( - "${log_path} contains the overlapping range with ${existing_path}.log, droping ${existing_path}.log " + "{log_path} contains the overlapping range with {existing_path}.log, droping {existing_path}.log " "from catelog", ("log_path", log_path.string())("existing_path", existing_itr->second.filename_base.string())); } @@ -188,7 +188,7 @@ struct log_catalog { bfs::rename(old_name, new_name); } else { bfs::remove(old_name); - wlog("${new_name} already exists, just removing ${old_name}", + wlog("{new_name} already exists, just removing {old_name}", ("old_name", old_name.string())("new_name", new_name.string())); } } diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index ce92e1f5e8d..8527c57be70 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -111,6 +111,10 @@ namespace eosio { namespace chain { inline storage_usage_trace generic_storage_usage_trace(uint32_t action_id) { return {action_id}; } + namespace trace { + void to_trimmed_trace_string(string& result, const transaction_trace& t, const controller& chain); + } + } } /// namespace eosio::chain namespace fmt { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 7d1f66b6ffa..166545e3f39 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -720,7 +720,7 @@ Allows privileged contracts to get and set subsets of blockchain parameters. ); if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "FEATURE_OP ACTIVATE ${feature_digest} ${feature}", + fc_dlog(*dm_logger, "FEATURE_OP ACTIVATE {feature_digest} {feature}", ("feature_digest", feature_digest) ("feature", itr->to_variant().as_string()) ); diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index 74e314a202a..0277c7879aa 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -56,7 +56,7 @@ void resource_limits_manager::initialize_database() { // see default settings in the declaration if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP CONFIG INS ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP CONFIG INS {data}", ("data", config) ); } @@ -70,7 +70,7 @@ void resource_limits_manager::initialize_database() { state.virtual_net_limit = config.net_limit_parameters.max; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP STATE INS ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP STATE INS {data}", ("data", state) ); } @@ -122,7 +122,7 @@ void resource_limits_manager::initialize_account(const account_name& account) { bl.owner = account; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS INS ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS INS {data}", ("data", bl) ); } @@ -132,7 +132,7 @@ void resource_limits_manager::initialize_account(const account_name& account) { bu.owner = account; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE INS ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE INS {data}", ("data", bu) ); } @@ -151,7 +151,7 @@ void resource_limits_manager::set_block_parameters(const elastic_limit_parameter c.net_limit_parameters = net_limit_parameters; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP CONFIG UPD ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP CONFIG UPD {data}", ("data", c) ); } @@ -186,7 +186,7 @@ void resource_limits_manager::add_transaction_usage(const flat_set bu.cpu_usage.add( cpu_usage, time_slot, config.account_cpu_usage_average_window ); if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE UPD ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_USAGE UPD {data}", ("data", bu) ); } @@ -257,7 +257,7 @@ void resource_limits_manager::add_pending_ram_usage( const account_name account, u.ram_usage += ram_delta; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RAM_OP ${action_id} ${event_id} ${family} ${operation} ${legacy_tag} ${payer} ${new_usage} ${delta}", + fc_dlog(*dm_logger, "RAM_OP {action_id} {event_id} {family} {operation} {legacy_tag} {payer} {new_usage} {delta}", ("action_id", trace.action_id) ("event_id", trace.event_id) ("family", trace.family) @@ -324,7 +324,7 @@ bool resource_limits_manager::set_account_limits( const account_name& account, i pending_limits.cpu_weight = cpu_weight; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS UPD ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP ACCOUNT_LIMITS UPD {data}", ("data", pending_limits) ); } @@ -396,7 +396,7 @@ void resource_limits_manager::process_account_limit_updates() { } if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD {data}", ("data", state) ); } @@ -418,7 +418,7 @@ void resource_limits_manager::process_block_usage(uint32_t block_num) { state.pending_net_usage = 0; if (auto dm_logger = _get_deep_mind_logger()) { - fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD ${data}", + fc_dlog(*dm_logger, "RLIMIT_OP STATE UPD {data}", ("data", state) ); } diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp index 8c21ee92ff4..02a9712d04d 100644 --- a/libraries/chain/trace.cpp +++ b/libraries/chain/trace.cpp @@ -1,5 +1,10 @@ #include #include +#include +#include +#include +#include + namespace eosio { namespace chain { @@ -35,4 +40,205 @@ action_trace::action_trace( ,producer_block_id( trace.producer_block_id ) {} +namespace trace { + template + void to_trimmed_trace_container_string(string& result, const char* name, const Container& vec, const controller& chain); + + void to_trimmed_string(string& result, const action_trace& at, const controller& chain) { + + result += "\"action_ordinal\":" + std::to_string(at.action_ordinal) + "," + + "\"creator_action_ordinal\":" + std::to_string(at.creator_action_ordinal) + "," + + "\"closest_unnotified_ancestor_action_ordinal\":" + std::to_string(at.closest_unnotified_ancestor_action_ordinal) + ","; + if (at.receipt.has_value()) { + result += "\"receipt\":{"; + result += "\"receiver\":\"" + at.receipt->receiver.to_string() + "\"" + "," + + "\"act_digest\":\"" + at.receipt->act_digest.str() + "\"" + "," + + "\"global_sequence\":" + std::to_string(at.receipt->global_sequence) + "," + + "\"recv_sequence\":" + std::to_string(at.receipt->recv_sequence) + ","; + result += "\"auth_sequence\":["; + auto itr = at.receipt->auth_sequence.find(at.receipt->receiver); + if (itr != at.receipt->auth_sequence.end()){ + result += "[\"" + itr->first.to_string() + "\"," + std::to_string(itr->second) + "]"; + } else { + result += "[]"; + } + result += "],"; + result += "\"code_sequence\":" + std::to_string(at.receipt->code_sequence) + "," + + "\"abi_sequence\":" + std::to_string(at.receipt->abi_sequence); + result += "}"; + } else { + result += "null"; + } + result += ","; + result += "\"receiver\":\"" + at.receiver.to_string() + "\"" + ","; + + // action trace + auto a = at.act; + result += "\"act\":{"; //act begin + result += "\"account\":\"" + a.account.to_string() + "\"," + + "\"name\":\"" + a.name.to_string() + "\","; + to_trimmed_trace_container_string(result, "authorization", a.authorization, chain); + result += ","; + + if( a.account == config::system_account_name && a.name == "setcode"_n ) { + auto setcode_act = a.data_as(); + if( setcode_act.code.size() > 0 ) { + result += "\"code_hash\":"; + fc::sha256 code_hash = fc::sha256::hash(setcode_act.code.data(), (uint32_t) setcode_act.code.size()); + result += "\"" + code_hash.str() + "\","; + } + } + + result += "\"data\":"; + abi_serializer::yield_function_t yield = abi_serializer::create_yield_function(chain.get_abi_serializer_max_time()); + auto abi = chain.get_abi_serializer(a.account, yield); + fc::variant output; + if (abi) { + auto type = abi->get_action_type(a.name); + if (!type.empty()) { + try { + output = abi->binary_to_log_variant(type, a.data, yield); + result += fc::json::to_string(output, fc::time_point::maximum()); + result += ","; + result += "\"hex_data\":{"; + } catch (...) { + // any failure to serialize data, then leave as not serialized + result += "{"; + } + } else { + result += "{"; + } + } else { + result += "{"; + } + + result += "\"size\":" + std::to_string(a.data.size()) + ","; + if( a.data.size() > impl::hex_log_max_size ) { + result += "\"trimmed_hex\":\"" + fc::to_hex(std::vector(a.data.begin(), a.data.begin() + impl::hex_log_max_size)) + "\""; + } else { + result += "\"hex\":\"" + fc::to_hex(a.data) + "\""; + } + result += "}"; + result += "}"; //act end + result += ","; + // action trace end + + result = result + "\"context_free\":" + (at.context_free ? "true" : "false") + ","; + result += "\"elapsed\":" + std::to_string(at.elapsed.count()) + "," + + "\"console\":\"" + at.console + "\"," + + "\"trx_id\":\"" + at.trx_id.str() + "\"," + + "\"block_num\":" + std::to_string(at.block_num) + "," + + "\"block_time\":\"" + (std::string)at.block_time.to_time_point() + "\"," + + "\"producer_block_id\":"; + if (at.producer_block_id.has_value()) { + result += "\"" + at.producer_block_id->str() + "\""; + } else { + result += "null"; + } + result += ","; + + // account_ram_deltas + to_trimmed_trace_container_string(result, "account_ram_deltas", at.account_ram_deltas, chain); + result += ","; + to_trimmed_trace_container_string(result, "account_disk_deltas", at.account_disk_deltas, chain); + result += ","; + + result += "\"except\":"; + if (at.except.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + result += "\"error_code\":"; + if (at.error_code.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + + result += "\"return_value\":\"" + std::string(at.return_value.begin(), at.return_value.end()) + "\""; + } + + void to_trimmed_string(string& result, const account_delta& ad, const controller& chain) { + result += "\"account\":\"" + ad.account.to_string() + "\"," + + "\"delta\":\"" + std::to_string(ad.delta) + "\""; + } + + void to_trimmed_string(string& result, const permission_level& perm, const controller& chain) { + result += "\"actor\":\"" + perm.actor.to_string() + "\"," + + "\"permission\":\"" + perm.permission.to_string() + "\""; + } + + template + void to_trimmed_trace_container_string(string& result, const char* name, const Container& vec, const controller& chain) { + result = result + "\"" + name + "\":["; + for (const auto& v : vec) { + result += "{"; + to_trimmed_string(result, v, chain); + result += "},"; + } + if (!vec.empty()) + result.pop_back(); //remove the last `,` + result += "]"; + } + + void to_trimmed_trace_string(string& result, const transaction_trace& t, const controller& chain) { + result = "{"; + result += "\"id\":\"" + (std::string)t.id + "\"," + + "\"block_num\":" + std::to_string(t.block_num) + "," + + "\"block_time\":\"" + (std::string)t.block_time.to_time_point() + "\"" + "," + + "\"producer_block_id\":" + ( t.producer_block_id.has_value() ? (std::string)t.producer_block_id.value() : "null" ) + ","; + if (t.receipt.has_value()) { + result += "\"receipt\":{"; + result += "\"status\":\"" + (std::string)t.receipt->status + "\"" + "," + + "\"cpu_usage_us\":" + std::to_string(t.receipt->cpu_usage_us) + "," + + "\"net_usage_words\":" + std::to_string(t.receipt->net_usage_words); + result += "}"; + } else{ + result += "null"; + } + result += ","; + result += "\"elapsed\":" + std::to_string(t.elapsed.count()) + "," + + "\"net_usage\":" + std::to_string(t.net_usage) + "," + + "\"scheduled\":" + (t.scheduled ? "true" : "false") + ","; + + // action_trace + to_trimmed_trace_container_string(result, "action_traces", t.action_traces, chain); + result += ","; + + result += "\"account_ram_delta\":"; + if (t.account_ram_delta.has_value()) { + result += "null"; // TODO... + } else { + result += "null"; + } + result += ","; + + result += "\"failed_dtrx_trace\":"; + result += "null"; // TODO... + result += ","; + + result += "\"except\":"; + if (t.except.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + result += "\"error_code\":"; + if (t.error_code.has_value()) { + ;//TODO... + } else { + result += "null"; + } + result += ","; + result += "\"except_ptr\":"; + result += "null"; //TODO... + + result += "}"; + } +} + } } // eosio::chain diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index a3ca65244ab..e3fc9f74e92 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -645,7 +645,7 @@ namespace eosio { namespace chain { if (recurse_depth == 0) { if (auto dm_logger = control.get_deep_mind_logger()) { - fc_dlog(*dm_logger, "CREATION_OP ROOT ${action_id}", + fc_dlog(*dm_logger, "CREATION_OP ROOT {action_id}", ("action_id", get_action_id()) ); } @@ -683,7 +683,7 @@ namespace eosio { namespace chain { event_id = STORAGE_EVENT_ID("${id}", ("id", gto.id)); auto packed_signed_trx = fc::raw::pack(packed_trx.to_packed_transaction_v0()->get_signed_transaction()); - fc_dlog(*dm_logger, "DTRX_OP PUSH_CREATE ${action_id} ${sender} ${sender_id} ${payer} ${published} ${delay} ${expiration} ${trx_id} ${trx}", + fc_dlog(*dm_logger, "DTRX_OP PUSH_CREATE {action_id} {sender} {sender_id} {payer} {published} {delay} {expiration} {trx_id} {trx}", ("action_id", get_action_id()) ("sender", gto.sender.to_string()) ("sender_id", gto.sender_id) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index fa32e1d4bab..e5f487043ef 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -109,7 +109,7 @@ std::tuple code_cache_async::consume_compile_thread_queue() { _cache_index.push_front(cd); }, [&](const compilation_result_unknownfailure&) { - wlog("code ${c} failed to tier-up with EOS VM OC", ("c", result.code.code_id)); + wlog("code {c} failed to tier-up with EOS VM OC", ("c", result.code.code_id)); _blacklist.emplace(result.code); }, [&](const compilation_result_toofull&) { @@ -375,7 +375,7 @@ code_cache_base::code_cache_base(const boost::filesystem::path data_dir, const e } allocator->deallocate((char*)load_region.get_address() + cache_header.serialized_descriptor_index); - ilog("EOS VM Optimized Compiler code cache loaded with ${c} entries; ${f} of ${t} bytes free", ("c", number_entries)("f", allocator->get_free_memory())("t", allocator->get_size())); + ilog("EOS VM Optimized Compiler code cache loaded with {c} entries; {f} of {t} bytes free", ("c", number_entries)("f", allocator->get_free_memory())("t", allocator->get_size())); } _free_bytes_eviction_threshold = on_disk_size * .1; diff --git a/libraries/chain_kv/include/b1/session/undo_stack.hpp b/libraries/chain_kv/include/b1/session/undo_stack.hpp index 63f49aaecec..9fa9729047e 100644 --- a/libraries/chain_kv/include/b1/session/undo_stack.hpp +++ b/libraries/chain_kv/include/b1/session/undo_stack.hpp @@ -305,7 +305,7 @@ void undo_stack::close() { out << *value; } else { fc::remove( undo_stack_dat ); // May not be used by next startup - elog( "Did not find value for ${k}", ("k", key.data() ) ); + elog( "Did not find value for {k}", ("k", key.data() ) ); return; // Do not assert as we are during shutdown } } diff --git a/libraries/fc b/libraries/fc index 9f87c6a3c43..5c53bed83fa 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 9f87c6a3c43bd58f7590851762656239310d21d9 +Subproject commit 5c53bed83fa8f8a7e3b88175ff1eabfd4abd19a2 diff --git a/libraries/rodeos/include/b1/rodeos/wasm_ql.hpp b/libraries/rodeos/include/b1/rodeos/wasm_ql.hpp index ba34abc95e5..4ee5b481328 100644 --- a/libraries/rodeos/include/b1/rodeos/wasm_ql.hpp +++ b/libraries/rodeos/include/b1/rodeos/wasm_ql.hpp @@ -78,12 +78,12 @@ class thread_state_cache : public std::enable_shared_from_thisblock_num <= head) { if (!undo_stack_enabled) { - wlog("can't switch forks at ${b} since undo stack is disabled. head: ${h}", ("b", result.this_block->block_num) ("h", head)); + wlog("can't switch forks at {b} since undo stack is disabled. head: {h}", ("b", result.this_block->block_num) ("h", head)); EOS_ASSERT(false, eosio::chain::unsupported_feature, "can't switch forks at ${b} since undo stack is disabled. head: ${h}", ("b", result.this_block->block_num) ("h", head)); } else { - ilog("switch forks at block ${b}; database contains revisions ${f} - ${h}", + ilog("switch forks at block {b}; database contains revisions {f} - {h}", ("b", result.this_block->block_num)("f", undo_stack->first_revision())("h", undo_stack->revision())); if (undo_stack->first_revision() >= result.this_block->block_num) throw std::runtime_error("can't switch forks since database doesn't contain revision " + @@ -230,7 +230,7 @@ void rodeos_db_snapshot::write_deltas(uint32_t block_num, eosio::opaque 10000 && !(num_processed % 10000)) { if (shutdown()) throw std::runtime_error("shutting down"); - ilog("block ${b} ${t} ${n} of ${r}", + ilog("block {b} {t} {n} of {r}", ("b", block_num)("t", delta_any_v.name)("n", num_processed)("r", delta_any_v.rows.size())); if (head == 0) { end_write(false); @@ -293,7 +293,7 @@ rodeos_filter::rodeos_filter(eosio::name name, const std::string& wasm_filename, std::ifstream wasm_file(wasm_filename, std::ios::binary); if (!wasm_file.is_open()) throw std::runtime_error("can not open " + wasm_filename); - ilog("compiling ${f}", ("f", wasm_filename)); + ilog("compiling {f}", ("f", wasm_filename)); wasm_file.seekg(0, std::ios::end); int len = wasm_file.tellg(); if (len < 0) @@ -317,7 +317,7 @@ rodeos_filter::rodeos_filter(eosio::name name, const std::string& wasm_filename, cache_path, eosvmoc_config, code, eosio::chain::digest_type::hash(reinterpret_cast(code.data()), code.size())); } catch( const eosio::chain::database_exception& e ) { - wlog( "eosvmoc cache exception ${e} removing cache ${c}", ("e", e.to_string())("c", cache_path.generic_string()) ); + wlog( "eosvmoc cache exception {e} removing cache {c}", ("e", e.to_string())("c", cache_path.generic_string()) ); // destroy cache and try again boost::filesystem::remove_all( cache_path ); filter_state->eosvmoc_tierup.emplace( @@ -364,7 +364,7 @@ void rodeos_filter::process(rodeos_db_snapshot& snapshot, const ship_protocol::g (*backend)(cb, "env", "apply", uint64_t(0), uint64_t(0), uint64_t(0)); if (!filter_state->console.empty()) - ilog("filter ${n} console output: <<<\n${c}>>>", ("n", name.to_string())("c", filter_state->console)); + ilog("filter {n} console output: <<<\n{c}>>>", ("n", name.to_string())("c", filter_state->console)); } catch (...) { try { throw; @@ -373,14 +373,14 @@ void rodeos_filter::process(rodeos_db_snapshot& snapshot, const ship_protocol::g } catch ( const boost::interprocess::bad_alloc& ) { throw; } catch( const fc::exception& e ) { - elog( "fc::exception processing filter wasm: ${e}", ("e", e.to_detail_string()) ); + elog( "fc::exception processing filter wasm: {e}", ("e", e.to_detail_string()) ); } catch( const std::exception& e ) { - elog( "std::exception processing filter wasm: ${e}", ("e", e.what()) ); + elog( "std::exception processing filter wasm: {e}", ("e", e.what()) ); } catch( ... ) { elog( "unknown exception processing filter wasm" ); } if (!filter_state->console.empty()) - ilog("filter ${n} console output before exception: <<<\n${c}>>>", + ilog("filter {n} console output before exception: <<<\n{c}>>>", ("n", name.to_string())("c", filter_state->console)); throw; } diff --git a/libraries/rodeos/wasm_ql.cpp b/libraries/rodeos/wasm_ql.cpp index f3f11303626..c5e67f941fb 100644 --- a/libraries/rodeos/wasm_ql.cpp +++ b/libraries/rodeos/wasm_ql.cpp @@ -174,7 +174,7 @@ std::optional> read_code(wasm_ql::thread_state& thread_stat auto filename = thread_state.shared->contract_dir + "/" + (std::string)account + ".wasm"; std::ifstream wasm_file(filename, std::ios::binary); if (wasm_file.is_open()) { - ilog("compiling ${f}", ("f", filename)); + ilog("compiling {f}", ("f", filename)); wasm_file.seekg(0, std::ios::end); int len = wasm_file.tellg(); if (len < 0) @@ -213,7 +213,7 @@ std::optional> read_contract(db_view_state& db_view_state, // todo: avoid copy result.emplace(code0.code.pos, code0.code.end); - ilog("compiling ${h}: ${a}", ("h", eosio::convert_to_json(hash))("a", (std::string)account)); + ilog("compiling {h}: {a}", ("h", eosio::convert_to_json(hash))("a", (std::string)account)); return result; } @@ -737,7 +737,7 @@ const std::vector& query_create_checkpoint(wasm_ql::thread_state& char buf[30] = "temp"; strftime(buf, 30, "%FT%H-%M-%S", localtime(&t)); auto tmp_path = dir / buf; - ilog("creating checkpoint ${p}", ("p", tmp_path.string())); + ilog("creating checkpoint {p}", ("p", tmp_path.string())); rocksdb::Checkpoint* p; b1::chain_kv::check(rocksdb::Checkpoint::Create(thread_state.shared->db->rdb.get(), &p), @@ -748,7 +748,7 @@ const std::vector& query_create_checkpoint(wasm_ql::thread_state& create_checkpoint_result result; { - ilog("examining checkpoint ${p}", ("p", tmp_path.string())); + ilog("examining checkpoint {p}", ("p", tmp_path.string())); auto db = std::make_shared(tmp_path.c_str(), false); auto partition = std::make_shared(db, std::vector{}); rodeos::rodeos_db_snapshot snap{ partition, true }; @@ -763,15 +763,15 @@ const std::vector& query_create_checkpoint(wasm_ql::thread_state& ("-head-" + std::to_string(result.head) + "-" + head_id_json.substr(1, head_id_json.size() - 2)); ilog("checkpoint contains:"); - ilog(" revisions: ${f} - ${r}", + ilog(" revisions: {f} - {r}", ("f", snap.undo_stack->first_revision())("r", snap.undo_stack->revision())); - ilog(" chain: ${a}", ("a", eosio::convert_to_json(snap.chain_id))); - ilog(" head: ${a} ${b}", ("a", snap.head)("b", eosio::convert_to_json(snap.head_id))); - ilog(" irreversible: ${a} ${b}", + ilog(" chain: {a}", ("a", eosio::convert_to_json(snap.chain_id))); + ilog(" head: {a} {b}", ("a", snap.head)("b", eosio::convert_to_json(snap.head_id))); + ilog(" irreversible: {a} {b}", ("a", snap.irreversible)("b", eosio::convert_to_json(snap.irreversible_id))); } - ilog("rename ${a} to ${b}", ("a", tmp_path.string())("b", result.path)); + ilog("rename {a} to {b}", ("a", tmp_path.string())("b", result.path)); boost::filesystem::rename(tmp_path, result.path); auto json = eosio::convert_to_json(result); @@ -780,10 +780,10 @@ const std::vector& query_create_checkpoint(wasm_ql::thread_state& ilog("checkpoint finished"); return thread_state.action_return_value; } catch (const fc::exception& e) { - elog("fc::exception creating snapshot: ${e}", ("e", e.to_detail_string())); + elog("fc::exception creating snapshot: {e}", ("e", e.to_detail_string())); throw; } catch (const std::exception& e) { - elog("std::exception creating snapshot: ${e}", ("e", e.what())); + elog("std::exception creating snapshot: {e}", ("e", e.what())); throw; } catch (...) { diff --git a/libraries/state_history/log.cpp b/libraries/state_history/log.cpp index 0c97998db98..e53345b87f2 100644 --- a/libraries/state_history/log.cpp +++ b/libraries/state_history/log.cpp @@ -51,11 +51,11 @@ state_history_log::state_history_log(const char* const name, const state_history this->ctx.run(); } catch(...) { - fc_elog(logger,"catched exception from ${name} write thread", ("name", this->name)); + fc_elog(logger,"catched exception from {name} write thread", ("name", this->name)); eptr = std::current_exception(); write_thread_has_exception = true; } - fc_ilog(logger,"${name} thread ended", ("name", this->name)); + fc_ilog(logger,"{name} thread ended", ("name", this->name)); }); } @@ -119,20 +119,20 @@ bool state_history_log::get_last_block(uint64_t size) { read_log.seek(size - sizeof(suffix)); read_log.read((char*)&suffix, sizeof(suffix)); if (suffix > size || suffix + state_history_log_header_serial_size > size) { - fc_elog(logger,"corrupt ${name}.log (2)", ("name", name)); + fc_elog(logger,"corrupt {name}.log (2)", ("name", name)); return false; } read_log.seek(suffix); read_header(header, false); if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || suffix + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) != size) { - fc_elog(logger,"corrupt ${name}.log (3)", ("name", name)); + fc_elog(logger,"corrupt {name}.log (3)", ("name", name)); return false; } _end_block = chain::block_header::num_from_id(header.block_id) + 1; last_block_id = header.block_id; if (_begin_block >= _end_block) { - fc_elog(logger,"corrupt ${name}.log (4)", ("name", name)); + fc_elog(logger,"corrupt {name}.log (4)", ("name", name)); return false; } return true; @@ -140,7 +140,7 @@ bool state_history_log::get_last_block(uint64_t size) { // only called from constructor indirectly void state_history_log::recover_blocks(uint64_t size) { - fc_ilog(logger,"recover ${name}.log", ("name", name)); + fc_ilog(logger,"recover {name}.log", ("name", name)); uint64_t pos = 0; uint32_t num_found = 0; while (true) { @@ -162,7 +162,7 @@ void state_history_log::recover_blocks(uint64_t size) { break; pos = pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix); if (!(++num_found % 10000)) { - fc_dlog(logger,"${num_found} blocks found, log pos = ${pos}", ("num_found", num_found)("pos", pos)); + fc_dlog(logger,"{num_found} blocks found, log pos = {pos}", ("num_found", num_found)("pos", pos)); } } read_log.flush(); @@ -192,10 +192,10 @@ void state_history_log::open_log(bfs::path log_filename) { last_block_id = header.block_id; if (!get_last_block(size)) recover_blocks(size); - fc_ilog(logger,"${name}.log has blocks ${b}-${e}", ("name", name)("b", _begin_block)("e", _end_block - 1)); + fc_ilog(logger,"{name}.log has blocks {b}-{e}", ("name", name)("b", _begin_block)("e", _end_block - 1)); } else { EOS_ASSERT(!size, chain::state_history_exception, "corrupt ${name}.log (5)", ("name", name)); - fc_ilog(logger,"${name}.log is empty", ("name", name)); + fc_ilog(logger,"{name}.log is empty", ("name", name)); } } @@ -206,7 +206,7 @@ void state_history_log::open_index(bfs::path index_filename) { index.seek_end(0); if (index.tellp() == (static_cast(_end_block) - _begin_block) * sizeof(uint64_t)) return; - fc_ilog(logger,"Regenerate ${name}.index", ("name", name)); + fc_ilog(logger,"Regenerate {name}.index", ("name", name)); index.close(); state_history_log_data(read_log.get_file_path()).construct_index(index_filename); @@ -258,14 +258,14 @@ void state_history_log::truncate(state_history_log::block_num_type block_num) { index.close(); index.open("a+b"); - fc_ilog(logger,"fork or replay: removed ${n} blocks from ${name}.log", ("n", num_removed)("name", name)); + fc_ilog(logger,"fork or replay: removed {n} blocks from {name}.log", ("n", num_removed)("name", name)); } // only called from write_entry() std::pair state_history_log::write_entry_header(const state_history_log_header& header, const chain::block_id_type& prev_id) { block_num_type block_num = chain::block_header::num_from_id(header.block_id); - fc_dlog(logger,"write_entry_header name=${name} block_num=${block_num}",("name", name) ("block_num", block_num)); + fc_dlog(logger,"write_entry_header name={name} block_num={block_num}",("name", name) ("block_num", block_num)); EOS_ASSERT(_begin_block == _end_block || block_num <= _end_block, chain::state_history_exception, "missed a block in ${name}.log", ("name", name)); @@ -351,7 +351,7 @@ void state_history_log::store_entry(const chain::block_id_type& id, const chain: cached.erase(cached.begin()); } - fc_dlog(logger,"store_entry name=${name}, block_num=${block_num} cached.size = ${sz}, num_buffered_entries=${num_buffered_entries}, id=${id}", + fc_dlog(logger,"store_entry name={name}, block_num={block_num} cached.size = {sz}, num_buffered_entries={num_buffered_entries}, id={id}", ("name", name)("block_num", block_num)("sz", cached.size())("num_buffered_entries", num_buffered_entries)("id", id)); } @@ -368,7 +368,7 @@ void state_history_log::write_entry(const chain::block_id_type& id, const chain: this->write_payload(write_log, *data); lock.lock(); write_entry_position(header, start_pos, block_num); - fc_dlog(logger, "entry block_num=${block_num} id=${id} written", ("block_num", block_num)("id", id)); + fc_dlog(logger, "entry block_num={block_num} id={id} written", ("block_num", block_num)("id", id)); } catch (...) { write_log.close(); boost::filesystem::resize_file(write_log.get_file_path(), start_pos); @@ -509,7 +509,7 @@ void state_history_chain_state_log::store(const chain::combined_database& db, auto [begin, end] = begin_end_block_nums(); bool fresh = begin == end; if (fresh) - fc_ilog(logger,"Placing initial state in block ${n}", ("n", block_state->block->block_num())); + fc_ilog(logger,"Placing initial state in block {n}", ("n", block_state->block->block_num())); using namespace state_history; std::vector deltas = create_deltas(db, fresh); diff --git a/libraries/tpm-helpers/tpm-helpers.cpp b/libraries/tpm-helpers/tpm-helpers.cpp index d710da4306d..9ad0d65c9a4 100644 --- a/libraries/tpm-helpers/tpm-helpers.cpp +++ b/libraries/tpm-helpers/tpm-helpers.cpp @@ -230,7 +230,7 @@ std::map usable_persistent_keys_and_handles ESYS_TR object; rc = Esys_TR_FromTPMPublic(esys_ctx.ctx(), handle, ESYS_TR_NONE, ESYS_TR_NONE, ESYS_TR_NONE, &object); if(rc) { - wlog("Failed to load TPM persistent handle: ${m}", ("m", Tss2_RC_Decode(rc))); + wlog("Failed to load TPM persistent handle: {m}", ("m", Tss2_RC_Decode(rc))); continue; } auto cleanup_tr_object = fc::make_scoped_exit([&]() {Esys_TR_Close(esys_ctx.ctx(), &object);}); diff --git a/plugins/amqp_trx_plugin/amqp_trace_plugin_impl.cpp b/plugins/amqp_trx_plugin/amqp_trace_plugin_impl.cpp index e5c55ed636b..c52935264d1 100644 --- a/plugins/amqp_trx_plugin/amqp_trace_plugin_impl.cpp +++ b/plugins/amqp_trx_plugin/amqp_trace_plugin_impl.cpp @@ -48,7 +48,7 @@ void amqp_trace_plugin_impl::publish_error( std::string routing_key, std::string } else { amqp_trace.publish_message_direct( rk, cid, std::move( buf ), [mode]( const std::string& err ) { - elog( "AMQP direct message error: ${e}", ("e", err) ); + elog( "AMQP direct message error: {e}", ("e", err) ); if( mode == reliable_mode::exit ) appbase::app().quit(); } ); @@ -71,9 +71,9 @@ void amqp_trace_plugin_impl::publish_result( std::string routing_key, rk=std::move(routing_key), cid=std::move(correlation_id), uuid=std::move(block_uuid), mode=pub_reliable_mode]() mutable { if( !trace->except ) { - dlog( "chain accepted transaction, bcast ${id}", ("id", trace->id) ); + dlog( "chain accepted transaction, bcast {id}", ("id", trace->id) ); } else { - dlog( "trace except : ${m}", ("m", trace->except->to_string()) ); + dlog( "trace except : {m}", ("m", trace->except->to_string()) ); } transaction_trace_msg msg{ transaction_trace_message{ std::move(uuid), eosio::state_history::convert( *trace ) } }; std::vector buf = convert_to_bin( msg ); @@ -82,7 +82,7 @@ void amqp_trace_plugin_impl::publish_result( std::string routing_key, } else { amqp_trace.publish_message_direct( rk, cid, std::move( buf ), [mode]( const std::string& err ) { - elog( "AMQP direct message error: ${e}", ("e", err) ); + elog( "AMQP direct message error: {e}", ("e", err) ); if( mode == reliable_mode::exit ) appbase::app().quit(); } ); @@ -107,7 +107,7 @@ void amqp_trace_plugin_impl::publish_block_uuid( std::string routing_key, } else { amqp_trace.publish_message_direct( rk, {}, std::move( buf ), [mode]( const std::string& err ) { - elog( "AMQP direct message error: ${e}", ("e", err) ); + elog( "AMQP direct message error: {e}", ("e", err) ); if( mode == reliable_mode::exit ) appbase::app().quit(); } ); diff --git a/plugins/amqp_trx_plugin/amqp_trx_plugin.cpp b/plugins/amqp_trx_plugin/amqp_trx_plugin.cpp index 0fec0f6f885..e52b7ce0d94 100644 --- a/plugins/amqp_trx_plugin/amqp_trx_plugin.cpp +++ b/plugins/amqp_trx_plugin/amqp_trx_plugin.cpp @@ -128,7 +128,7 @@ struct amqp_trx_plugin_impl : std::enable_shared_from_this if( prod_plugin->paused() && started_consuming ) { ilog("Stopping consuming amqp messages during on_block_start"); amqp_trx->stop_consume([](const std::string& consumer_tag){ - dlog("Stopped consuming from amqp tag: ${t}", ("t", consumer_tag)); + dlog("Stopped consuming from amqp tag: {t}", ("t", consumer_tag)); }); started_consuming = false; const bool clear = true; @@ -176,7 +176,7 @@ struct amqp_trx_plugin_impl : std::enable_shared_from_this chain::packed_transaction_ptr trx ) { static_assert(std::is_same_v, "fifo_trx_processing_queue assumes delivery_tag is an uint64_t"); const auto& tid = trx->id(); - dlog( "received packed_transaction ${id}", ("id", tid) ); + dlog( "received packed_transaction {id}", ("id", tid) ); auto trx_trace = fc_create_trace_with_id("Transaction", tid); auto trx_span = fc_create_span(trx_trace, "AMQP Received"); @@ -193,7 +193,7 @@ struct amqp_trx_plugin_impl : std::enable_shared_from_this if( std::holds_alternative(result) ) { auto& eptr = std::get(result); fc_add_tag(trx_span, "error", eptr->to_string()); - dlog( "accept_transaction ${id} exception: ${e}", ("id", trx->id())("e", eptr->to_string()) ); + dlog( "accept_transaction {id} exception: {e}", ("id", trx->id())("e", eptr->to_string()) ); if( my->acked == ack_mode::executed || my->acked == ack_mode::in_block ) { // ack immediately on failure my->amqp_trx->ack( delivery_tag ); } @@ -210,12 +210,12 @@ struct amqp_trx_plugin_impl : std::enable_shared_from_this } if( trace->except ) { fc_add_tag(trx_span, "error", trace->except->to_string()); - dlog( "accept_transaction ${id} exception: ${e}", ("id", trx->id())("e", trace->except->to_string()) ); + dlog( "accept_transaction {id} exception: {e}", ("id", trx->id())("e", trace->except->to_string()) ); if( my->acked == ack_mode::executed || my->acked == ack_mode::in_block ) { // ack immediately on failure my->amqp_trx->ack( delivery_tag ); } } else { - dlog( "accept_transaction ${id}", ("id", trx->id()) ); + dlog( "accept_transaction {id}", ("id", trx->id()) ); if( my->acked == ack_mode::executed ) { my->amqp_trx->ack( delivery_tag ); } else if( my->acked == ack_mode::in_block ) { @@ -333,7 +333,7 @@ void amqp_trx_plugin::plugin_startup() { my->trace_plug.amqp_trace.emplace( my->trace_plug.amqp_trace_address, my->trace_plug.amqp_trace_exchange, my->trace_plug.amqp_trace_queue_name, trace_data_file_path, []( const std::string& err ) { - elog( "AMQP fatal error: ${e}", ("e", err) ); + elog( "AMQP fatal error: {e}", ("e", err) ); appbase::app().quit(); } ); @@ -350,7 +350,7 @@ void amqp_trx_plugin::plugin_startup() { fc::microseconds(my->trx_retry_timeout_us), fc::microseconds(my->trx_retry_interval_us), []( const std::string& err ) { - elog( "amqp error: ${e}", ("e", err) ); + elog( "amqp error: {e}", ("e", err) ); app().quit(); } ); diff --git a/plugins/amqp_trx_plugin/include/eosio/amqp_trx_plugin/fifo_trx_processing_queue.hpp b/plugins/amqp_trx_plugin/include/eosio/amqp_trx_plugin/fifo_trx_processing_queue.hpp index f4173993c27..1146b4c6284 100644 --- a/plugins/amqp_trx_plugin/include/eosio/amqp_trx_plugin/fifo_trx_processing_queue.hpp +++ b/plugins/amqp_trx_plugin/include/eosio/amqp_trx_plugin/fifo_trx_processing_queue.hpp @@ -192,7 +192,7 @@ class fifo_trx_processing_queue : public std::enable_shared_from_thisid()) ); + dlog( "posting trx: {id}", ("id", trx_meta->id()) ); app().post( priority::low, [self, trx{std::move( trx_meta )}, next{std::move( i.next )}]() { self->prod_plugin_->execute_incoming_transaction( trx, next ); self->queue_.unpause(); @@ -259,7 +259,7 @@ class fifo_trx_processing_queue : public std::enable_shared_from_thisid()) ); + ilog( "Queue stopped, unable to process transaction {id}, not ack'ed to AMQP", ("id", trx->id()) ); } } diff --git a/plugins/amqp_trx_plugin/test/test_ordered.cpp b/plugins/amqp_trx_plugin/test/test_ordered.cpp index e3cbd088869..c22d98cfdc7 100644 --- a/plugins/amqp_trx_plugin/test/test_ordered.cpp +++ b/plugins/amqp_trx_plugin/test/test_ordered.cpp @@ -70,7 +70,7 @@ struct mock_producer_plugin { bool verify_equal( const std::deque& trxs) { if( trxs.size() != trxs_.size() ) { - elog( "${lhs} != ${rhs}", ("lhs", trxs.size())("rhs", trxs_.size()) ); + elog( "{lhs} != {rhs}", ("lhs", trxs.size())("rhs", trxs_.size()) ); return false; } for( size_t i = 0; i < trxs.size(); ++i ) { diff --git a/plugins/amqp_trx_plugin/test/test_ordered_full.cpp b/plugins/amqp_trx_plugin/test/test_ordered_full.cpp index 1786d58b3e9..d045a018049 100644 --- a/plugins/amqp_trx_plugin/test/test_ordered_full.cpp +++ b/plugins/amqp_trx_plugin/test/test_ordered_full.cpp @@ -86,10 +86,10 @@ bool verify_equal( const std::deque& trxs, const std::de const auto& trx = next_trx(); if( trxs[i]->id() != trx.id() ) { - elog( "[${i}],[${j},${k}]: ${lhs} != ${rhs}", ("i", i)("j", j)("k", k) + elog( "[{i}],[{j},{k}]: {lhs} != {rhs}", ("i", i)("j", j)("k", k) ("lhs", trxs[i]->get_transaction().actions.at(0).data_as().id) ("rhs", trx.actions.at(0).data_as().id) ); - elog( "[${i}],[${j},${k}]: ${lhs} != ${rhs}", ("i", i)("j", j)("k", k) + elog( "[{i}],[{j},{k}]: {lhs} != {rhs}", ("i", i)("j", j)("k", k) ("lhs", trxs[i]->id()) ("rhs", trx.id()) ); return false; diff --git a/plugins/chain_plugin/account_query_db.cpp b/plugins/chain_plugin/account_query_db.cpp index 1f74c1d3610..c1e7e10ce3c 100644 --- a/plugins/chain_plugin/account_query_db.cpp +++ b/plugins/chain_plugin/account_query_db.cpp @@ -160,7 +160,7 @@ namespace eosio::chain_apis { add_to_bimaps(*pi, po); } auto duration = fc::time_point::now() - start; - ilog("Finished building account query DB in ${sec}", ("sec", (duration.count() / 1'000'000.0 ))); + ilog("Finished building account query DB in {sec}", ("sec", (duration.count() / 1'000'000.0 ))); } /** diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 89c06a2dcd8..a9a1fd7616d 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -35,8 +35,6 @@ #include #include -#include - // reflect chainbase::environment for --print-build-info option FC_REFLECT_ENUM( chainbase::environment::os_t, (OS_LINUX)(OS_MACOS)(OS_WINDOWS)(OS_OTHER) ) @@ -471,10 +469,10 @@ fc::time_point calculate_genesis_timestamp( string tstr ) { if (diff_us > 0) { auto delay_us = (config::block_interval_us - diff_us); genesis_timestamp += fc::microseconds(delay_us); - dlog("pausing ${us} microseconds to the next interval",("us",delay_us)); + dlog("pausing {us} microseconds to the next interval",("us",delay_us)); } - ilog( "Adjusting genesis timestamp to ${timestamp}", ("timestamp", genesis_timestamp) ); + ilog( "Adjusting genesis timestamp to {timestamp}", ("timestamp", genesis_timestamp) ); return genesis_timestamp; } @@ -493,10 +491,10 @@ std::optional read_builtin_protocol_feature( const fc: try { return fc::json::from_file( p ); } catch( const fc::exception& e ) { - wlog( "problem encountered while reading '${path}':\n${details}", + wlog( "problem encountered while reading '{path}':\n{details}", ("path", p.generic_string())("details",e.to_detail_string()) ); } catch( ... ) { - dlog( "unknown problem encountered while reading '${path}'", + dlog( "unknown problem encountered while reading '{path}'", ("path", p.generic_string()) ); } return {}; @@ -525,12 +523,12 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul if( f.subjective_restrictions.enabled ) { if( f.subjective_restrictions.preactivation_required ) { if( f.subjective_restrictions.earliest_allowed_activation_time == time_point{} ) { - ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled with preactivation required", + ilog( "Support for builtin protocol feature '{codename}' (with digest of '{digest}') is enabled with preactivation required", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ); } else { - ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled with preactivation required and with an earliest allowed activation time of ${earliest_time}", + ilog( "Support for builtin protocol feature '{codename}' (with digest of '{digest}') is enabled with preactivation required and with an earliest allowed activation time of {earliest_time}", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ("earliest_time", f.subjective_restrictions.earliest_allowed_activation_time) @@ -538,12 +536,12 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul } } else { if( f.subjective_restrictions.earliest_allowed_activation_time == time_point{} ) { - ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled without activation restrictions", + ilog( "Support for builtin protocol feature '{codename}' (with digest of '{digest}') is enabled without activation restrictions", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ); } else { - ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled without preactivation required but with an earliest allowed activation time of ${earliest_time}", + ilog( "Support for builtin protocol feature '{codename}' (with digest of '{digest}') is enabled without preactivation required but with an earliest allowed activation time of {earliest_time}", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ("earliest_time", f.subjective_restrictions.earliest_allowed_activation_time) @@ -551,7 +549,7 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul } } } else { - ilog( "Recognized builtin protocol feature '${codename}' (with digest of '${digest}') but support for it is not enabled", + ilog( "Recognized builtin protocol feature '{codename}' (with digest of '{digest}') but support for it is not enabled", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ); @@ -634,13 +632,13 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul ); if( fc::json::save_to_file( f, file_path ) ) { - ilog( "Saved default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + ilog( "Saved default specification for builtin protocol feature '{codename}' (with digest of '{digest}') to: {path}", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ("path", file_path.generic_string()) ); } else { - elog( "Error occurred while writing default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + elog( "Error occurred while writing default specification for builtin protocol feature '{codename}' (with digest of '{digest}') to: {path}", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) ("path", file_path.generic_string()) @@ -734,7 +732,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { try { genesis_state gs; // Check if EOSIO_ROOT_KEY is bad } catch ( const std::exception& ) { - elog( "EOSIO_ROOT_KEY ('${root_key}') is invalid. Recompile with a valid public key.", + elog( "EOSIO_ROOT_KEY ('{root_key}') is invalid. Recompile with a valid public key.", ("root_key", genesis_state::eosio_root_key)); throw; } @@ -743,7 +741,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( options.at( "print-build-info" ).as() || options.count( "extract-build-info") ) { if( options.at( "print-build-info" ).as() ) { - ilog( "Build environment JSON:\n${e}", ("e", json::to_pretty_string( chainbase::environment() )) ); + ilog( "Build environment JSON:\n{e}", ("e", json::to_pretty_string( chainbase::environment() )) ); } if( options.count( "extract-build-info") ) { auto p = options.at( "extract-build-info" ).as(); @@ -757,7 +755,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ("path", p.generic_string()) ); - ilog( "Saved build info JSON to '${path}'", ("path", p.generic_string()) ); + ilog( "Saved build info JSON to '{path}'", ("path", p.generic_string()) ); } EOS_THROW( node_management_success, "reported build environment information" ); @@ -937,13 +935,13 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ("path", (my->blocks_dir / "blocks.log").generic_string()) ); } else { - wlog( "No blocks.log found at '${p}'. Using default genesis state.", + wlog( "No blocks.log found at '{p}'. Using default genesis state.", ("p", (my->blocks_dir / "blocks.log").generic_string())); gs.emplace(); } if( options.at( "print-genesis-json" ).as()) { - ilog( "Genesis JSON:\n${genesis}", ("genesis", json::to_pretty_string( *gs ))); + ilog( "Genesis JSON:\n{genesis}", ("genesis", json::to_pretty_string( *gs ))); } if( options.count( "extract-genesis-json" )) { @@ -959,7 +957,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ("path", p.generic_string()) ); - ilog( "Saved genesis JSON to '${path}'", ("path", p.generic_string()) ); + ilog( "Saved genesis JSON to '{path}'", ("path", p.generic_string()) ); } EOS_THROW( extract_genesis_state_exception, "extracted genesis state from blocks.log" ); @@ -1078,10 +1076,10 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( options.count( "genesis-timestamp" ) ) { provided_genesis.initial_timestamp = calculate_genesis_timestamp( options.at( "genesis-timestamp" ).as() ); - ilog( "Using genesis state provided in '${genesis}' but with adjusted genesis timestamp", + ilog( "Using genesis state provided in '{genesis}' but with adjusted genesis timestamp", ("genesis", genesis_file.generic_string()) ); } else { - ilog( "Using genesis state provided in '${genesis}'", ("genesis", genesis_file.generic_string())); + ilog( "Using genesis state provided in '{genesis}'", ("genesis", genesis_file.generic_string())); } if( block_log_genesis ) { @@ -1154,7 +1152,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if( my->api_accept_transactions ) { my->api_accept_transactions = false; std::stringstream ss; ss << my->chain_config->read_mode; - wlog( "api-accept-transactions set to false due to read-mode: ${m}", ("m", ss.str()) ); + wlog( "api-accept-transactions set to false due to read-mode: {m}", ("m", ss.str()) ); } } if( my->api_accept_transactions ) { @@ -1258,7 +1256,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if (auto dm_logger = my->chain->get_deep_mind_logger()) { auto packed_blk = fc::raw::pack(*blk); - fc_dlog(*dm_logger, "ACCEPTED_BLOCK ${num} ${blk}", + fc_dlog(*dm_logger, "ACCEPTED_BLOCK {num} {blk}", ("num", blk->block_num) ("blk", fc::to_hex(packed_blk)) ); @@ -1285,7 +1283,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if (auto dm_logger = my->chain->get_deep_mind_logger()) { auto packed_trace = fc::raw::pack(*std::get<0>(t)); - fc_dlog(*dm_logger, "APPLIED_TRANSACTION ${block} ${traces}", + fc_dlog(*dm_logger, "APPLIED_TRANSACTION {block} {traces}", ("block", my->chain->head_block_num() + 1) ("traces", fc::to_hex(packed_trace)) ); @@ -1334,11 +1332,11 @@ void chain_plugin::plugin_startup() } if (my->genesis) { - ilog("Blockchain started; head block is #${num}, genesis timestamp is ${ts}", + ilog("Blockchain started; head block is #{num}, genesis timestamp is {ts}", ("num", my->chain->head_block_num())("ts", (std::string)my->genesis->initial_timestamp)); } else { - ilog("Blockchain started; head block is #${num}", ("num", my->chain->head_block_num())); + ilog("Blockchain started; head block is #{num}", ("num", my->chain->head_block_num())); } my->chain_config.reset(); @@ -1430,7 +1428,7 @@ void chain_plugin::log_guard_exception(const chain::guard_exception&e ) { "Please increase the value set for \"reversible-blocks-db-size-mb\" and restart the process!"); } - dlog("Details: ${details}", ("details", e.to_detail_string())); + dlog("Details: {details}", ("details", e.to_detail_string())); } void chain_plugin::handle_guard_exception(const chain::guard_exception& e) { @@ -3494,6 +3492,13 @@ std::string chain_plugin::to_trimmed_trx_string(const transaction& t, const cont return result; } +std::string chain_plugin::get_log_trx_trace(const transaction_trace_ptr& t, const controller& chain) const { + static_assert( fc::reflector::total_member_count == 13); + string result; + eosio::chain::trace::to_trimmed_trace_string(result, *t, chain); + return result; +} + } // namespace eosio diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 20a978f5f5a..524c11360bb 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -1093,6 +1093,7 @@ class chain_plugin : public plugin { fc::variant get_log_trx(const transaction& trx) const; std::string to_trimmed_trx_string(const transaction& t, const controller& chain) const; + std::string get_log_trx_trace(const chain::transaction_trace_ptr& t, const controller& chain) const; private: static void log_guard_exception(const chain::guard_exception& e); diff --git a/plugins/http_client_plugin/http_client_plugin.cpp b/plugins/http_client_plugin/http_client_plugin.cpp index fa052d7ebf8..c2b7405d044 100644 --- a/plugins/http_client_plugin/http_client_plugin.cpp +++ b/plugins/http_client_plugin/http_client_plugin.cpp @@ -38,9 +38,9 @@ void http_client_plugin::plugin_initialize(const variables_map& options) { } catch ( const boost::interprocess::bad_alloc& ) { throw; } catch ( const fc::exception& e ) { - elog( "Failed to read PEM ${f} : ${e}", ("f", root_pem)( "e", e.to_detail_string())); + elog( "Failed to read PEM {f} : {e}", ("f", root_pem)( "e", e.to_detail_string())); } catch ( const std::exception& e ) { - elog( "Failed to read PEM ${f} : ${e}", ("f", root_pem)( "e", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); + elog( "Failed to read PEM {f} : {e}", ("f", root_pem)( "e", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); } } @@ -51,9 +51,9 @@ void http_client_plugin::plugin_initialize(const variables_map& options) { } catch ( const boost::interprocess::bad_alloc& ) { throw; } catch ( const fc::exception& e ) { - elog( "Failed to read PEM : ${e} \n${pem}\n", ("pem", pem_str)( "e", e.to_detail_string())); + elog( "Failed to read PEM : {e} \n{pem}\n", ("pem", pem_str)( "e", e.to_detail_string())); } catch ( const std::exception& e ) { - elog( "Failed to read PEM : ${e} \n${pem}\n", ("pem", pem_str)( "e", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); + elog( "Failed to read PEM : {e} \n{pem}\n", ("pem", pem_str)( "e", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); } } } diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 964746625b2..1b34074f111 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -281,9 +281,9 @@ class http_plugin_impl : public std::enable_shared_from_this { "!DHE:!RSA:!AES128:!RC4:!DES:!3DES:!DSS:!SRP:!PSK:!EXP:!MD5:!LOW:!aNULL:!eNULL") != 1) EOS_THROW(chain::http_exception, "Failed to set HTTPS cipher list"); } catch (const fc::exception& e) { - fc_elog( logger, "https server initialization error: ${w}", ("w", e.to_detail_string()) ); + fc_elog( logger, "https server initialization error: {w}", ("w", e.to_detail_string()) ); } catch(std::exception& e) { - fc_elog( logger, "https server initialization error: ${w}", ("w", e.what()) ); + fc_elog( logger, "https server initialization error: {w}", ("w", e.what()) ); } return ctx; @@ -299,13 +299,13 @@ class http_plugin_impl : public std::enable_shared_from_this { throw; } catch (const fc::exception& e) { err += e.to_detail_string(); - fc_elog( logger, "${e}", ("e", err)); + fc_elog( logger, "{e}", ("e", err)); error_results results{websocketpp::http::status_code::internal_server_error, "Internal Service Error", error_results::error_info( e, verbose_http_errors )}; con->set_body( fc::json::to_string( results, deadline )); } catch (const std::exception& e) { err += e.what(); - fc_elog( logger, "${e}", ("e", err)); + fc_elog( logger, "{e}", ("e", err)); error_results results{websocketpp::http::status_code::internal_server_error, "Internal Service Error", error_results::error_info( fc::exception( FC_LOG_MESSAGE( error, e.what())), @@ -322,10 +322,10 @@ class http_plugin_impl : public std::enable_shared_from_this { } } catch (fc::timeout_exception& e) { con->set_body( R"xxx({"message": "Internal Server Error"})xxx" ); - fc_elog( logger, "Timeout exception ${te} attempting to handle exception: ${e}", ("te", e.to_detail_string())("e", err) ); + fc_elog( logger, "Timeout exception {te} attempting to handle exception: {e}", ("te", e.to_detail_string())("e", err) ); } catch (...) { con->set_body( R"xxx({"message": "Internal Server Error"})xxx" ); - fc_elog( logger, "Exception attempting to handle exception: ${e}", ("e", err) ); + fc_elog( logger, "Exception attempting to handle exception: {e}", ("e", err) ); } con->send_http_response(); } @@ -360,7 +360,7 @@ class http_plugin_impl : public std::enable_shared_from_this { bool verify_max_bytes_in_flight( const T& con ) { auto bytes_in_flight_size = bytes_in_flight.load(); if( bytes_in_flight_size > max_bytes_in_flight ) { - fc_dlog( logger, "429 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight_size) ); + fc_dlog( logger, "429 - too many bytes in flight: {bytes}", ("bytes", bytes_in_flight_size) ); string what = "Too many bytes in flight: " + std::to_string( bytes_in_flight_size ) + ". Try again later.";; report_429_error(con, what); return false; @@ -376,7 +376,7 @@ class http_plugin_impl : public std::enable_shared_from_this { auto requests_in_flight_num = requests_in_flight.load(); if( requests_in_flight_num > max_requests_in_flight ) { - fc_dlog( logger, "429 - too many requests in flight: ${requests}", ("requests", requests_in_flight_num) ); + fc_dlog( logger, "429 - too many requests in flight: {requests}", ("requests", requests_in_flight_num) ); string what = "Too many requests in flight: " + std::to_string( requests_in_flight_num ) + ". Try again later."; report_429_error(con, what); return false; @@ -640,7 +640,7 @@ class http_plugin_impl : public std::enable_shared_from_this { std::string body = con->get_request_body(); handler_itr->second( abstract_conn_ptr, std::move( resource ), std::move( body ), make_http_response_handler(abstract_conn_ptr) ); } else { - fc_dlog( logger, "404 - not found: ${ep}", ("ep", resource) ); + fc_dlog( logger, "404 - not found: {ep}", ("ep", resource) ); error_results results{websocketpp::http::status_code::not_found, "Not Found", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Endpoint" )), verbose_http_errors )}; con->set_body( fc::json::to_string( results, fc::time_point::now() + max_response_time )); @@ -664,9 +664,9 @@ class http_plugin_impl : public std::enable_shared_from_this { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ - fc_elog( logger, "http: ${e}", ("e", e.to_detail_string()) ); + fc_elog( logger, "http: {e}", ("e", e.to_detail_string()) ); } catch ( const std::exception& e ){ - fc_elog( logger, "http: ${e}", ("e", e.what()) ); + fc_elog( logger, "http: {e}", ("e", e.what()) ); } catch (...) { fc_elog( logger, "error thrown from http io service" ); } @@ -725,21 +725,21 @@ class http_plugin_impl : public std::enable_shared_from_this { ("access-control-allow-origin", bpo::value()->notifier([this](const string& v) { my->access_control_allow_origin = v; - fc_ilog( logger, "configured http with Access-Control-Allow-Origin: ${o}", + fc_ilog( logger, "configured http with Access-Control-Allow-Origin: {o}", ("o", my->access_control_allow_origin) ); }), "Specify the Access-Control-Allow-Origin to be returned on each request.") ("access-control-allow-headers", bpo::value()->notifier([this](const string& v) { my->access_control_allow_headers = v; - fc_ilog( logger, "configured http with Access-Control-Allow-Headers : ${o}", + fc_ilog( logger, "configured http with Access-Control-Allow-Headers : {o}", ("o", my->access_control_allow_headers) ); }), "Specify the Access-Control-Allow-Headers to be returned on each request.") ("access-control-max-age", bpo::value()->notifier([this](const string& v) { my->access_control_max_age = v; - fc_ilog( logger, "configured http with Access-Control-Max-Age : ${o}", + fc_ilog( logger, "configured http with Access-Control-Max-Age : {o}", ("o", my->access_control_max_age) ); }), "Specify the Access-Control-Max-Age to be returned on each request.") @@ -784,9 +784,9 @@ class http_plugin_impl : public std::enable_shared_from_this { string port = lipstr.substr( host.size() + 1, lipstr.size()); try { my->listen_endpoint = *resolver.resolve( tcp::v4(), host, port ); - ilog( "configured http to listen on ${h}:${p}", ("h", host)( "p", port )); + ilog( "configured http to listen on {h}:{p}", ("h", host)( "p", port )); } catch ( const boost::system::system_error& ec ) { - elog( "failed to configure http to listen on ${h}:${p} (${m})", + elog( "failed to configure http to listen on {h}:{p} ({m})", ("h", host)( "p", port )( "m", ec.what())); } @@ -820,12 +820,12 @@ class http_plugin_impl : public std::enable_shared_from_this { string port = lipstr.substr( host.size() + 1, lipstr.size()); try { my->https_listen_endpoint = *resolver.resolve( tcp::v4(), host, port ); - ilog( "configured https to listen on ${h}:${p} (TLS configuration will be validated momentarily)", + ilog( "configured https to listen on {h}:{p} (TLS configuration will be validated momentarily)", ("h", host)( "p", port )); my->https_cert_chain = options.at( "https-certificate-chain-file" ).as(); my->https_key = options.at( "https-private-key-file" ).as(); } catch ( const boost::system::system_error& ec ) { - elog( "failed to configure https to listen on ${h}:${p} (${m})", + elog( "failed to configure https to listen on {h}:{p} ({m})", ("h", host)( "p", port )( "m", ec.what())); } @@ -865,10 +865,10 @@ class http_plugin_impl : public std::enable_shared_from_this { my->server.listen(*my->listen_endpoint); my->server.start_accept(); } catch ( const fc::exception& e ){ - fc_elog( logger, "http service failed to start: ${e}", ("e", e.to_detail_string()) ); + fc_elog( logger, "http service failed to start: {e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - fc_elog( logger, "http service failed to start: ${e}", ("e", e.what()) ); + fc_elog( logger, "http service failed to start: {e}", ("e", e.what()) ); throw; } catch (...) { fc_elog( logger, "error thrown from http io service" ); @@ -888,13 +888,13 @@ class http_plugin_impl : public std::enable_shared_from_this { }); my->unix_server.start_accept(); } catch ( const fc::exception& e ){ - fc_elog( logger, "unix socket service (${path}) failed to start: ${e}", ("e", e.to_detail_string())("path",my->unix_endpoint->path()) ); + fc_elog( logger, "unix socket service ({path}) failed to start: {e}", ("e", e.to_detail_string())("path",my->unix_endpoint->path()) ); throw; } catch ( const std::exception& e ){ - fc_elog( logger, "unix socket service (${path}) failed to start: ${e}", ("e", e.what())("path",my->unix_endpoint->path()) ); + fc_elog( logger, "unix socket service ({path}) failed to start: {e}", ("e", e.what())("path",my->unix_endpoint->path()) ); throw; } catch (...) { - fc_elog( logger, "error thrown from unix socket (${path}) io service", ("path",my->unix_endpoint->path()) ); + fc_elog( logger, "error thrown from unix socket ({path}) io service", ("path",my->unix_endpoint->path()) ); throw; } } @@ -910,10 +910,10 @@ class http_plugin_impl : public std::enable_shared_from_this { my->https_server.listen(*my->https_listen_endpoint); my->https_server.start_accept(); } catch ( const fc::exception& e ){ - fc_elog( logger, "https service failed to start: ${e}", ("e", e.to_detail_string()) ); + fc_elog( logger, "https service failed to start: {e}", ("e", e.to_detail_string()) ); throw; } catch ( const std::exception& e ){ - fc_elog( logger, "https service failed to start: ${e}", ("e", e.what()) ); + fc_elog( logger, "https service failed to start: {e}", ("e", e.what()) ); throw; } catch (...) { fc_elog( logger, "error thrown from https io service" ); @@ -964,12 +964,12 @@ class http_plugin_impl : public std::enable_shared_from_this { } void http_plugin::add_handler(const string& url, const url_handler& handler, int priority) { - fc_ilog( logger, "add api url: ${c}", ("c", url) ); + fc_ilog( logger, "add api url: {c}", ("c", url) ); my->url_handlers[url] = my->make_app_thread_url_handler(priority, handler, my); } void http_plugin::add_async_handler(const string& url, const url_handler& handler) { - fc_ilog( logger, "add api url: ${c}", ("c", url) ); + fc_ilog( logger, "add api url: {c}", ("c", url) ); my->url_handlers[url] = my->make_http_thread_url_handler(handler); } @@ -992,24 +992,24 @@ class http_plugin_impl : public std::enable_shared_from_this { } catch (fc::eof_exception& e) { error_results results{422, "Unprocessable Entity", error_results::error_info(e, verbose_http_errors)}; cb( 422, fc::variant( results )); - fc_elog( logger, "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name ) ); - fc_dlog( logger, "Bad arguments: ${args}", ("args", body) ); + fc_elog( logger, "Unable to parse arguments to {api}.{call}", ("api", api_name)( "call", call_name ) ); + fc_dlog( logger, "Bad arguments: {args}", ("args", body) ); } catch (fc::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(e, verbose_http_errors)}; cb( 500, fc::variant( results )); - fc_dlog( logger, "Exception while processing ${api}.${call}: ${e}", + fc_dlog( logger, "Exception while processing {api}.{call}: {e}", ("api", api_name)( "call", call_name )("e", e.to_detail_string()) ); } catch (std::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors)}; cb( 500, fc::variant( results )); - fc_elog( logger, "STD Exception encountered while processing ${api}.${call}", + fc_elog( logger, "STD Exception encountered while processing {api}.{call}", ("api", api_name)( "call", call_name ) ); - fc_dlog( logger, "Exception Details: ${e}", ("e", e.what()) ); + fc_dlog( logger, "Exception Details: {e}", ("e", e.what()) ); } catch (...) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Exception" )), verbose_http_errors)}; cb( 500, fc::variant( results )); - fc_elog( logger, "Unknown Exception encountered while processing ${api}.${call}", + fc_elog( logger, "Unknown Exception encountered while processing {api}.{call}", ("api", api_name)( "call", call_name ) ); } } catch (...) { diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 097cb196b9c..7bd71cad408 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -53,7 +53,7 @@ namespace eosio { template void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { if( !strand.running_in_this_thread() ) { - elog( "wrong strand: ${f} : line ${n}, exiting", ("f", func)("n", line) ); + elog( "wrong strand: {f} : line {n}, exiting", ("f", func)("n", line) ); app().quit(); } } @@ -891,7 +891,7 @@ namespace eosio { last_handshake_recv(), last_handshake_sent() { - fc_ilog( logger, "created connection ${c} to ${n}", ("c", connection_id)("n", endpoint) ); + fc_ilog( logger, "created connection {c} to {n}", ("c", connection_id)("n", endpoint) ); } connection::connection() @@ -933,16 +933,16 @@ namespace eosio { peer_add.substr( colon2 + 1 ) : peer_add.substr( colon2 + 1, end - (colon2 + 1) ); if( type.empty() ) { - fc_dlog( logger, "Setting connection ${c} type for: ${peer} to both transactions and blocks", ("c", connection_id)("peer", peer_add) ); + fc_dlog( logger, "Setting connection {c} type for: {peer} to both transactions and blocks", ("c", connection_id)("peer", peer_add) ); connection_type = both; } else if( type == "trx" ) { - fc_dlog( logger, "Setting connection ${c} type for: ${peer} to transactions only", ("c", connection_id)("peer", peer_add) ); + fc_dlog( logger, "Setting connection {c} type for: {peer} to transactions only", ("c", connection_id)("peer", peer_add) ); connection_type = transactions_only; } else if( type == "blk" ) { - fc_dlog( logger, "Setting connection ${c} type for: ${peer} to blocks only", ("c", connection_id)("peer", peer_add) ); + fc_dlog( logger, "Setting connection {c} type for: {peer} to blocks only", ("c", connection_id)("peer", peer_add) ); connection_type = blocks_only; } else { - fc_wlog( logger, "Unknown connection ${c} type: ${t}, for ${peer}", ("c", connection_id)("t", type)("peer", peer_add) ); + fc_wlog( logger, "Unknown connection {c} type: {t}, for {peer}", ("c", connection_id)("t", type)("peer", peer_add) ); } } @@ -1116,21 +1116,21 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr b = cc.fetch_block_by_id( blkid ); if( b ) { - fc_dlog( logger, "fetch_block_by_id num ${n}, connection ${cid}", + fc_dlog( logger, "fetch_block_by_id num {n}, connection {cid}", ("n", b->block_num())("cid", c->connection_id) ); my_impl->dispatcher->add_peer_block( blkid, c->connection_id ); c->strand.post( [c, b{std::move(b)}]() { c->enqueue_block( b ); } ); } else { - fc_ilog( logger, "fetch block by id returned null, id ${id}, connection ${cid}", + fc_ilog( logger, "fetch block by id returned null, id {id}, connection {cid}", ("id", blkid)("cid", c->connection_id) ); } } catch( const assert_exception& ex ) { - fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id}, connection ${cid}", + fc_elog( logger, "caught assert on fetch_block_by_id, {ex}, id {id}, connection {cid}", ("ex", ex.to_string())("id", blkid)("cid", c->connection_id) ); } catch( ... ) { - fc_elog( logger, "caught other exception fetching block id ${id}, connection ${cid}", + fc_elog( logger, "caught other exception fetching block id {id}, connection {cid}", ("id", blkid)("cid", c->connection_id) ); } }); @@ -1404,7 +1404,7 @@ namespace eosio { static_assert( signed_block_which == fc::get_index() ); // this implementation is to avoid copy of signed_block to net_message // matches which of net_message for signed_block - fc_dlog( logger, "sending block ${bn}", ("bn", sb->block_num()) ); + fc_dlog( logger, "sending block {bn}", ("bn", sb->block_num()) ); return buffer_factory::create_send_buffer( signed_block_which, *sb ); } @@ -1412,7 +1412,7 @@ namespace eosio { static_assert( signed_block_v0_which == fc::get_index() ); // this implementation is to avoid copy of signed_block_v0 to net_message // matches which of net_message for signed_block_v0 - fc_dlog( logger, "sending v0 block ${bn}", ("bn", sb_v0.block_num()) ); + fc_dlog( logger, "sending v0 block {bn}", ("bn", sb_v0.block_num()) ); return buffer_factory::create_send_buffer( signed_block_v0_which, sb_v0 ); } }; @@ -1446,7 +1446,7 @@ namespace eosio { static_assert( trx_message_v1_which == fc::get_index() ); std::optional trx_id; if( trx->get_estimated_size() > 1024 ) { // simple guess on threshold - fc_dlog( logger, "including trx id, est size: ${es}", ("es", trx->get_estimated_size()) ); + fc_dlog( logger, "including trx id, est size: {es}", ("es", trx->get_estimated_size()) ); trx_id = trx->id(); } // const cast required, trx_message_v1 has non-const shared_ptr because FC_REFLECT does not work with const types @@ -1506,7 +1506,7 @@ namespace eosio { [conn{std::move(self)}, close_after_send](boost::system::error_code ec, std::size_t ) { if (ec) return; if (close_after_send != no_reason) { - fc_ilog( logger, "sent a go away message: ${r}, closing connection ${cid}", + fc_ilog( logger, "sent a go away message: {r}, closing connection {cid}", ("r", reason_str(close_after_send))("cid", conn->connection_id) ); conn->close(); return; @@ -1618,7 +1618,7 @@ namespace eosio { if( sync_state == newstate ) { return false; } - fc_ilog( logger, "old state ${os} becoming ${ns}", ("os", stage_str( sync_state ))( "ns", stage_str( newstate ) ) ); + fc_ilog( logger, "old state {os} becoming {ns}", ("os", stage_str( sync_state ))( "ns", stage_str( newstate ) ) ); sync_state = newstate; return true; } @@ -1666,11 +1666,11 @@ namespace eosio { std::tie( lib_block_num, std::ignore, fork_head_block_num, std::ignore, std::ignore, std::ignore ) = my_impl->get_chain_info(); - fc_dlog( logger, "sync_last_requested_num: ${r}, sync_next_expected_num: ${e}, sync_known_lib_num: ${k}, sync_req_span: ${s}", + fc_dlog( logger, "sync_last_requested_num: {r}, sync_next_expected_num: {e}, sync_known_lib_num: {k}, sync_req_span: {s}", ("r", sync_last_requested_num)("e", sync_next_expected_num)("k", sync_known_lib_num)("s", sync_req_span) ); if( fork_head_block_num < sync_last_requested_num && sync_source && sync_source->current() ) { - fc_ilog( logger, "ignoring request, head is ${h} last req = ${r}, source connection ${c}", + fc_ilog( logger, "ignoring request, head is {h} last req = {r}, source connection {c}", ("h", fork_head_block_num)("r", sync_last_requested_num)("c", sync_source->connection_id) ); return; } @@ -1775,7 +1775,7 @@ namespace eosio { } bool sync_manager::is_sync_required( uint32_t fork_head_block_num ) { - fc_dlog( logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", + fc_dlog( logger, "last req = {req}, last recv = {recv} known = {known} our head = {head}", ("req", sync_last_requested_num)( "recv", sync_next_expected_num )( "known", sync_known_lib_num ) ("head", fork_head_block_num ) ); @@ -2205,7 +2205,7 @@ namespace eosio { end_size = local_txns.size(); g.unlock(); - fc_dlog( logger, "expire_local_txns size ${s} removed ${r}", ("s", start_size)( "r", start_size - end_size ) ); + fc_dlog( logger, "expire_local_txns size {s} removed {r}", ("s", start_size)( "r", start_size - end_size ) ); } void dispatch_manager::expire_blocks( uint32_t lib_num ) { @@ -2216,14 +2216,14 @@ namespace eosio { // thread safe void dispatch_manager::bcast_block(const signed_block_ptr& b, const block_id_type& id) { - fc_dlog( logger, "bcast block ${b}", ("b", b->block_num()) ); + fc_dlog( logger, "bcast block {b}", ("b", b->block_num()) ); if( my_impl->sync_master->syncing_with_peer() ) return; block_buffer_factory buff_factory; const auto bnum = b->block_num(); for_each_block_connection( [this, &id, &bnum, &b, &buff_factory]( auto& cp ) { - fc_dlog( logger, "socket_is_open ${s}, connecting ${c}, syncing ${ss}, connection ${cid}", + fc_dlog( logger, "socket_is_open {s}, connecting {c}, syncing {ss}, connection {cid}", ("s", cp->socket_is_open())("c", cp->connecting.load())("ss", cp->syncing.load())("cid", cp->connection_id) ); if( !cp->current() ) return true; send_buffer_type sb = buff_factory.get_send_buffer( b, cp->protocol_version.load() ); @@ -2274,7 +2274,7 @@ namespace eosio { } void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog( logger, "rejected block ${id}", ("id", id) ); + fc_dlog( logger, "rejected block {id}", ("id", id) ); } void dispatch_manager::bcast_transaction(const packed_transaction_ptr& trx) { @@ -2294,7 +2294,7 @@ namespace eosio { send_buffer_type sb = buff_factory.get_send_buffer( trx, cp->protocol_version.load() ); if( !sb ) return true; - fc_dlog( logger, "sending trx: ${id}, to connection ${cid}", ("id", trx->id())("cid", cp->connection_id) ); + fc_dlog( logger, "sending trx: {id}, to connection {cid}", ("id", trx->id())("cid", cp->connection_id) ); cp->strand.post( [cp, sb{std::move(sb)}]() { cp->enqueue_buffer( sb, no_reason ); } ); @@ -2303,7 +2303,7 @@ namespace eosio { } void dispatch_manager::rejected_transaction(const packed_transaction_ptr& trx, uint32_t head_blk_num) { - fc_dlog( logger, "not sending rejected transaction ${tid}", ("tid", trx->id()) ); + fc_dlog( logger, "not sending rejected transaction {tid}", ("tid", trx->id()) ); // keep rejected transaction around for awhile so we don't broadcast it // update its block number so it will be purged when current block number is lib if( trx->expiration() > fc::time_point::now() ) { // no need to update blk_num if already expired @@ -2393,13 +2393,13 @@ namespace eosio { case benign_other: break; default: - fc_dlog( logger, "Skipping connect due to go_away reason ${r}",("r", reason_str( no_retry ))); + fc_dlog( logger, "Skipping connect due to go_away reason {r}",("r", reason_str( no_retry ))); return false; } string::size_type colon = peer_address().find(':'); if (colon == std::string::npos || colon == 0) { - fc_elog( logger, "Invalid peer address. must be \"host:port[:|]\": ${p}", ("p", peer_address()) ); + fc_elog( logger, "Invalid peer address. must be \"host:port[:|]\": {p}", ("p", peer_address()) ); return false; } @@ -2430,7 +2430,7 @@ namespace eosio { if( !err ) { c->connect( resolver, endpoints ); } else { - fc_elog( logger, "Unable to resolve ${host}:${port} ${error}", + fc_elog( logger, "Unable to resolve {host}:{port} {error}", ("host", host)("port", port)( "error", err.message() ) ); c->connecting = false; ++c->consecutive_immediate_connection_close; @@ -2461,7 +2461,7 @@ namespace eosio { c->send_handshake(); } } else { - fc_elog( logger, "connection failed to ${host}:${port} ${error}", + fc_elog( logger, "connection failed to {host}:{port} {error}", ("host", endpoint.address().to_string())("port", endpoint.port())( "error", err.message())); c->close( false ); } @@ -2481,7 +2481,7 @@ namespace eosio { const auto& paddr_add = socket->remote_endpoint( rec ).address(); string paddr_str; if( rec ) { - fc_elog( logger, "Error getting remote endpoint: ${m}", ("m", rec.message())); + fc_elog( logger, "Error getting remote endpoint: {m}", ("m", rec.message())); } else { paddr_str = paddr_add.to_string(); for_each_connection( [&visitors, &from_addr, &paddr_str]( auto& conn ) { @@ -2506,10 +2506,10 @@ namespace eosio { } else { if( from_addr >= max_nodes_per_host ) { - fc_dlog( logger, "Number of connections (${n}) from ${ra} exceeds limit ${l}", + fc_dlog( logger, "Number of connections ({n}) from {ra} exceeds limit {l}", ("n", from_addr + 1)( "ra", paddr_str )( "l", max_nodes_per_host )); } else { - fc_dlog( logger, "max_client_count ${m} exceeded", ("m", max_client_count)); + fc_dlog( logger, "max_client_count {m} exceeded", ("m", max_client_count)); } // new_connection never added to connections and start_session not called, lifetime will end boost::system::error_code ec; @@ -2518,7 +2518,7 @@ namespace eosio { } } } else { - fc_elog( logger, "Error accepting connection: ${m}", ("m", ec.message())); + fc_elog( logger, "Error accepting connection: {m}", ("m", ec.message())); // For the listed error codes below, recall start_listen_loop() switch (ec.value()) { case ECONNABORTED: @@ -2869,7 +2869,7 @@ namespace eosio { chain_head_blk_id = cc.head_block_id(); chain_fork_head_blk_num = cc.fork_db_pending_head_block_num(); chain_fork_head_blk_id = cc.fork_db_pending_head_block_id(); - fc_dlog( logger, "updating chain info lib ${lib}, head ${head}, fork ${fork}", + fc_dlog( logger, "updating chain info lib {lib}, head {head}, fork {fork}", ("lib", chain_lib_num)("head", chain_head_blk_num)("fork", chain_fork_head_blk_num) ); } @@ -2964,7 +2964,7 @@ namespace eosio { if(check.get() == this) continue; std::unique_lock g_check_conn( check->conn_mtx ); - fc_dlog( logger, "dup check: connected ${c}, ${l} =? ${r}", + fc_dlog( logger, "dup check: connected {c}, {l} =? {r}", ("c", check->connected())("l", check->last_handshake_recv.node_id)("r", msg.node_id) ); if(check->connected() && check->last_handshake_recv.node_id == msg.node_id) { if (net_version < dup_goaway_resolution || msg.network_version < dup_goaway_resolution) { @@ -2979,14 +2979,14 @@ namespace eosio { continue; } else if (net_version < dup_node_id_goaway || msg.network_version < dup_node_id_goaway) { if (my_impl->p2p_address < msg.p2p_address) { - fc_dlog( logger, "my_impl->p2p_address '${lhs}' < msg.p2p_address '${rhs}'", + fc_dlog( logger, "my_impl->p2p_address '{lhs}' < msg.p2p_address '{rhs}'", ("lhs", my_impl->p2p_address)( "rhs", msg.p2p_address ) ); // only the connection from lower p2p_address to higher p2p_address will be considered as a duplicate, // so there is no chance for both connections to be closed continue; } } else if (my_impl->node_id < msg.node_id) { - fc_dlog( logger, "not duplicate, my_impl->node_id '${lhs}' < msg.node_id '${rhs}'", + fc_dlog( logger, "not duplicate, my_impl->node_id '{lhs}' < msg.node_id '{rhs}'", ("lhs", my_impl->node_id)("rhs", msg.node_id) ); // only the connection from lower node_id to higher node_id will be considered as a duplicate, // so there is no chance for both connections to be closed @@ -3039,7 +3039,7 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib_num = cc.last_irreversible_block_num(); - fc_dlog( logger, "handshake check for fork lib_num = ${ln}, peer_lib = ${pl}, connection ${cid}", + fc_dlog( logger, "handshake check for fork lib_num = {ln}, peer_lib = {pl}, connection {cid}", ("ln", lib_num)("pl", peer_lib)("cid", c->connection_id) ); if( peer_lib <= lib_num && peer_lib > 0 ) { @@ -3049,10 +3049,10 @@ namespace eosio { on_fork = (msg_lib_id != peer_lib_id); } catch( const unknown_block_exception& ) { // allow this for now, will be checked on sync - fc_dlog( logger, "peer last irreversible block ${pl} is unknown, connection ${cid}", + fc_dlog( logger, "peer last irreversible block {pl} is unknown, connection {cid}", ("pl", peer_lib)("cid", c->connection_id) ); } catch( ... ) { - fc_wlog( logger, "caught an exception getting block id for ${pl}, connection ${cid}", + fc_wlog( logger, "caught an exception getting block id for {pl}, connection {cid}", ("pl", peer_lib)("cid", c->connection_id) ); on_fork = true; } @@ -3271,13 +3271,13 @@ namespace eosio { [weak = weak_from_this(), trx](const std::variant& result) mutable { // next (this lambda) called from application thread if (std::holds_alternative(result)) { - fc_dlog( logger, "bad packed_transaction : ${m}", ("m", std::get(result)->what()) ); + fc_dlog( logger, "bad packed_transaction : {m}", ("m", std::get(result)->what()) ); } else { const transaction_trace_ptr& trace = std::get(result); if( !trace->except ) { - fc_dlog( logger, "chain accepted transaction, bcast ${id}", ("id", trace->id) ); + fc_dlog( logger, "chain accepted transaction, bcast {id}", ("id", trace->id) ); } else { - fc_elog( logger, "bad packed_transaction : ${m}", ("m", trace->except->what())); + fc_elog( logger, "bad packed_transaction : {m}", ("m", trace->except->what())); } } connection_ptr conn = weak.lock(); @@ -3341,7 +3341,7 @@ namespace eosio { } fc::microseconds age( fc::time_point::now() - msg->timestamp); - fc_dlog( logger, "received signed_block: #${n} block age in secs = ${age}, connection ${cid}", + fc_dlog( logger, "received signed_block: #{n} block age in secs = {age}, connection {cid}", ("n", blk_num)("age", age.to_seconds())("cid", c->connection_id) ); go_away_reason reason = fatal_other; @@ -3352,27 +3352,27 @@ namespace eosio { reason = no_reason; if( !accepted ) reason = unlinkable; // false if producing or duplicate, duplicate checked above } catch( const unlinkable_block_exception &ex) { - fc_elog(logger, "unlinkable_block_exception connection ${cid}: #${n} ${id}...: ${m}", + fc_elog(logger, "unlinkable_block_exception connection {cid}: #{n} {id}...: {m}", ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); reason = unlinkable; } catch( const block_validate_exception &ex ) { - fc_elog(logger, "block_validate_exception connection ${cid}: #${n} ${id}...: ${m}", + fc_elog(logger, "block_validate_exception connection {cid}: #{n} {id}...: {m}", ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); reason = validation; } catch( const assert_exception &ex ) { - fc_elog(logger, "block assert_exception connection ${cid}: #${n} ${id}...: ${m}", + fc_elog(logger, "block assert_exception connection {cid}: #{n} {id}...: {m}", ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); } catch( const fc::exception &ex ) { - fc_elog(logger, "bad block exception connection ${cid}: #${n} ${id}...: ${m}", + fc_elog(logger, "bad block exception connection {cid}: #{n} {id}...: {m}", ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))("m",ex.to_string())); } catch( ... ) { - fc_elog(logger, "bad block connection ${cid}: #${n} ${id}...: unknown exception", + fc_elog(logger, "bad block connection {cid}: #{n} {id}...: unknown exception", ("cid", c->connection_id)("n", blk_num)("id", blk_id.str().substr(8,16))); } if( reason == no_reason ) { boost::asio::post( my_impl->thread_pool->get_executor(), [dispatcher = my_impl->dispatcher.get(), blk_id, msg]() { - fc_dlog( logger, "accepted signed_block : #${n} ${id}...", ("n", msg->block_num())("id", blk_id.str().substr(8,16)) ); + fc_dlog( logger, "accepted signed_block : #{n} {id}...", ("n", msg->block_num())("id", blk_id.str().substr(8,16)) ); dispatcher->update_txns_block_num( msg ); }); c->strand.post( [sync_master = my_impl->sync_master.get(), dispatcher = my_impl->dispatcher.get(), c, blk_id, blk_num]() { @@ -3405,7 +3405,7 @@ namespace eosio { } else { if( num_in_flight == 0 ) { if( my->in_shutdown ) return; - fc_elog( logger, "Error from connection check monitor: ${m}", ("m", ec.message())); + fc_elog( logger, "Error from connection check monitor: {m}", ("m", ec.message())); my->start_conn_timer( my->connector_period, std::weak_ptr() ); } } @@ -3422,7 +3422,7 @@ namespace eosio { my->expire(); } else { if( my->in_shutdown ) return; - fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message()) ); + fc_elog( logger, "Error from transaction check monitor: {m}", ("m", ec.message()) ); my->start_expire_timer(); } } ); @@ -3437,7 +3437,7 @@ namespace eosio { my->ticker(); if( ec ) { if( my->in_shutdown ) return; - fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); + fc_wlog( logger, "Peer keepalive ticked sooner than expected: {m}", ("m", ec.message()) ); } tstamp current_time = connection::get_time(); @@ -3471,7 +3471,7 @@ namespace eosio { std::tie( lib, std::ignore, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); dispatcher->expire_blocks( lib ); dispatcher->expire_txns( lib ); - fc_dlog( logger, "expire_txns ${n}us", ("n", time_point::now() - now) ); + fc_dlog( logger, "expire_txns {n}us", ("n", time_point::now() - now) ); start_expire_timer(); } @@ -3489,8 +3489,8 @@ namespace eosio { if (fc::time_point::now() >= max_time) { connection_wptr wit = *it; g.unlock(); - fc_dlog( logger, "Exiting connection monitor early, ran out of time: ${t}", ("t", max_time - fc::time_point::now()) ); - fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}", + fc_dlog( logger, "Exiting connection monitor early, ran out of time: {t}", ("t", max_time - fc::time_point::now()) ); + fc_ilog( logger, "p2p client connections: {num}/{max}, peer connections: {pnum}/{pmax}", ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) ); if( reschedule ) { start_conn_timer( std::chrono::milliseconds( 1 ), wit ); // avoid exhausting @@ -3515,9 +3515,9 @@ namespace eosio { } g.unlock(); if( num_clients > 0 || num_peers > 0 ) - fc_ilog( logger, "p2p client connections: ${num}/${max}, peer connections: ${pnum}/${pmax}", + fc_ilog( logger, "p2p client connections: {num}/{max}, peer connections: {pnum}/{pmax}", ("num", num_clients)("max", max_client_count)("pnum", num_peers)("pmax", supplied_peers.size()) ); - fc_dlog( logger, "connection monitor, removed ${n} connections", ("n", num_rm) ); + fc_dlog( logger, "connection monitor, removed {n} connections", ("n", num_rm) ); if( reschedule ) { start_conn_timer( connector_period, std::weak_ptr()); } @@ -3528,7 +3528,7 @@ namespace eosio { update_chain_info(); controller& cc = chain_plug->chain(); dispatcher->strand.post( [this, bs]() { - fc_dlog( logger, "signaled accepted_block, blk num = ${num}, id = ${id}", ("num", bs->block_num)("id", bs->id) ); + fc_dlog( logger, "signaled accepted_block, blk num = {num}, id = {id}", ("num", bs->block_num)("id", bs->id) ); dispatcher->bcast_block( bs->block, bs->id ); }); } @@ -3540,7 +3540,7 @@ namespace eosio { if( cc.is_trusted_producer(block->producer) ) { dispatcher->strand.post( [this, block]() { auto id = block->calculate_id(); - fc_dlog( logger, "signaled pre_accepted_block, blk num = ${num}, id = ${id}", ("num", block->block_num())("id", id) ); + fc_dlog( logger, "signaled pre_accepted_block, blk num = {num}, id = {id}", ("num", block->block_num())("id", id) ); dispatcher->bcast_block( block, id ); }); @@ -3549,7 +3549,7 @@ namespace eosio { // called from application thread void net_plugin_impl::on_irreversible_block( const block_state_ptr& block) { - fc_dlog( logger, "on_irreversible_block, blk num = ${num}, id = ${id}", ("num", block->block_num)("id", block->id) ); + fc_dlog( logger, "on_irreversible_block, blk num = {num}, id = {id}", ("num", block->block_num)("id", block->id) ); update_chain_info(); } @@ -3558,13 +3558,13 @@ namespace eosio { dispatcher->strand.post( [this, results]() { const auto& id = results.second->id(); if (results.first) { - fc_dlog( logger, "signaled NACK, trx-id = ${id} : ${why}", ("id", id)( "why", results.first->to_detail_string() ) ); + fc_dlog( logger, "signaled NACK, trx-id = {id} : {why}", ("id", id)( "why", results.first->to_detail_string() ) ); uint32_t head_blk_num = 0; std::tie( std::ignore, head_blk_num, std::ignore, std::ignore, std::ignore, std::ignore ) = get_chain_info(); dispatcher->rejected_transaction(results.second->packed_trx(), head_blk_num); } else { - fc_dlog( logger, "signaled ACK, trx-id = ${id}", ("id", id) ); + fc_dlog( logger, "signaled ACK, trx-id = {id}", ("id", id) ); dispatcher->bcast_transaction(results.second->packed_trx()); } }); @@ -3584,7 +3584,7 @@ namespace eosio { if(producer_plug != nullptr) found_producer_key = producer_plug->is_producer_key(msg.key); if( allowed_it == allowed_peers.end() && private_it == private_keys.end() && !found_producer_key) { - fc_elog( logger, "Peer ${peer} sent a handshake with an unauthorized key: ${key}.", + fc_elog( logger, "Peer {peer} sent a handshake with an unauthorized key: {key}.", ("peer", msg.p2p_address)("key", msg.key.to_string()) ); return false; } @@ -3594,7 +3594,7 @@ namespace eosio { sc::system_clock::duration msg_time(msg.time); auto time = sc::system_clock::now().time_since_epoch(); if(time - msg_time > peer_authentication_interval) { - fc_elog( logger, "Peer ${peer} sent a handshake with a timestamp skewed by more than ${time}.", + fc_elog( logger, "Peer {peer} sent a handshake with a timestamp skewed by more than {time}.", ("peer", msg.p2p_address)("time", "1 second")); // TODO Add to_variant for std::chrono::system_clock::duration return false; } @@ -3602,7 +3602,7 @@ namespace eosio { if(msg.sig != chain::signature_type() && msg.token != sha256()) { sha256 hash = fc::sha256::hash(msg.time); if(hash != msg.token) { - fc_elog( logger, "Peer ${peer} sent a handshake with an invalid token.", ("peer", msg.p2p_address) ); + fc_elog( logger, "Peer {peer} sent a handshake with an invalid token.", ("peer", msg.p2p_address) ); return false; } chain::public_key_type peer_key; @@ -3610,11 +3610,11 @@ namespace eosio { peer_key = crypto::public_key(msg.sig, msg.token, true); } catch (const std::exception& /*e*/) { - fc_elog( logger, "Peer ${peer} sent a handshake with an unrecoverable key.", ("peer", msg.p2p_address) ); + fc_elog( logger, "Peer {peer} sent a handshake with an unrecoverable key.", ("peer", msg.p2p_address) ); return false; } if((allowed_connections & (Producers | Specified)) && peer_key != msg.key) { - fc_elog( logger, "Peer ${peer} sent a handshake with an unauthenticated key.", ("peer", msg.p2p_address) ); + fc_elog( logger, "Peer {peer} sent a handshake with an unauthenticated key.", ("peer", msg.p2p_address) ); return false; } } @@ -3832,7 +3832,7 @@ namespace eosio { if( my->p2p_accept_transactions ) { my->p2p_accept_transactions = false; string m = cc.get_read_mode() == db_read_mode::IRREVERSIBLE ? "irreversible" : "read-only"; - wlog( "p2p-accept-transactions set to false due to read-mode: ${m}", ("m", m) ); + wlog( "p2p-accept-transactions set to false due to read-mode: {m}", ("m", m) ); } } if( my->p2p_accept_transactions ) { @@ -3848,7 +3848,7 @@ namespace eosio { handle_sighup(); try { - fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); + fc_ilog( logger, "my node_id is {id}", ("id", my->node_id )); my->producer_plug = app().find_plugin(); @@ -3899,10 +3899,10 @@ namespace eosio { my->acceptor->bind(listen_endpoint); my->acceptor->listen(); } catch (const std::exception& e) { - elog( "net_plugin::plugin_startup failed to bind to port ${port}", ("port", listen_endpoint.port()) ); + elog( "net_plugin::plugin_startup failed to bind to port {port}", ("port", listen_endpoint.port()) ); throw e; } - fc_ilog( logger, "starting listener, max clients is ${mc}",("mc",my->max_client_count) ); + fc_ilog( logger, "starting listener, max clients is {mc}",("mc",my->max_client_count) ); my->start_listen_loop(); } { @@ -3966,10 +3966,10 @@ namespace eosio { } { - fc_ilog( logger, "close ${s} connections", ("s", my->connections.size()) ); + fc_ilog( logger, "close {s} connections", ("s", my->connections.size()) ); std::lock_guard g( my->connections_mtx ); for( auto& con : my->connections ) { - fc_dlog( logger, "close: ${cid}", ("cid", con->connection_id) ); + fc_dlog( logger, "close: {cid}", ("cid", con->connection_id) ); con->close( false, true ); } my->connections.clear(); @@ -4000,9 +4000,9 @@ namespace eosio { return "already connected"; connection_ptr c = std::make_shared( host ); - fc_dlog( logger, "calling active connector: ${h}", ("h", host) ); + fc_dlog( logger, "calling active connector: {h}", ("h", host) ); if( c->resolve_and_connect() ) { - fc_dlog( logger, "adding new connection to the list: ${host} ${cid}", ("host", host)("cid", c->connection_id) ); + fc_dlog( logger, "adding new connection to the list: {host} {cid}", ("host", host)("cid", c->connection_id) ); c->set_heartbeat_timeout( my->heartbeat_timeout ); my->connections.insert( c ); } @@ -4013,7 +4013,7 @@ namespace eosio { std::lock_guard g( my->connections_mtx ); for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_address() == host ) { - fc_ilog( logger, "disconnecting: ${cid}", ("cid", (*itr)->connection_id) ); + fc_ilog( logger, "disconnecting: {cid}", ("cid", (*itr)->connection_id) ); (*itr)->close(); my->connections.erase(itr); return "connection removed"; diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp index 3361575ce07..aec342dfb32 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/subjective_billing.hpp @@ -200,7 +200,7 @@ class subjective_billing { num_expired++; } - fc_dlog( log, "Processed ${n} subjective billed transactions, Expired ${expired}", + fc_dlog( log, "Processed {n} subjective billed transactions, Expired {expired}", ("n", orig_count)( "expired", num_expired ) ); } return !exhausted; diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index afb76b5bdd3..f617bebdf19 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -28,6 +28,8 @@ #include #include +#include + namespace bmi = boost::multi_index; using bmi::indexed_by; using bmi::ordered_non_unique; @@ -50,19 +52,19 @@ using boost::signals2::scoped_connection; } catch ( boost::interprocess::bad_alloc& ) { \ chain_plugin::handle_db_exhaustion(); \ } catch( fc::exception& er ) { \ - wlog( "${details}", ("details",er.to_detail_string()) ); \ + wlog( "{details}", ("details",er.to_detail_string()) ); \ } catch( const std::exception& e ) { \ fc::exception fce( \ - FC_LOG_MESSAGE( warn, "std::exception: ${what}: ",("what",e.what()) ), \ + FC_LOG_MESSAGE( warn, "std::exception: {what}: ",("what",e.what()) ), \ fc::std_exception_code,\ BOOST_CORE_TYPEID(e).name(), \ e.what() ) ; \ - wlog( "${details}", ("details",fce.to_detail_string()) ); \ + wlog( "{details}", ("details",fce.to_detail_string()) ); \ } catch( ... ) { \ fc::unhandled_exception e( \ FC_LOG_MESSAGE( warn, "unknown: ", ), \ std::current_exception() ); \ - wlog( "${details}", ("details",e.to_detail_string()) ); \ + wlog( "{details}", ("details",e.to_detail_string()) ); \ } const std::string logger_name("producer_plugin"); @@ -239,7 +241,7 @@ class producer_plugin_impl : public std::enable_shared_from_this& block_id) { auto& chain = chain_plug->chain(); if ( _pending_block_mode == pending_block_mode::producing) { - fc_wlog( _log, "dropped incoming block #${num} id: ${id}", + fc_wlog( _log, "dropped incoming block #{num} id: {id}", ("num", block->block_num())("id", block_id ? (*block_id).str() : "UNKNOWN") ); return false; } @@ -285,7 +287,7 @@ class producer_plugin_impl : public std::enable_shared_from_thiscalculate_id(); auto blk_num = block->block_num(); - fc_dlog(_log, "received incoming block ${n} ${id}", ("n", blk_num)("id", id)); + fc_dlog(_log, "received incoming block {n} {id}", ("n", blk_num)("id", id)); EOS_ASSERT( block->timestamp < (fc::time_point::now() + fc::seconds( 7 )), block_from_the_future, "received a block from the future, ignoring it: ${id}", ("id", id) ); @@ -327,7 +329,7 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp < fc::minutes(5) || (blk_num % 1000 == 0) ) { - ilog("Received block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, conf: ${confs}, latency: ${latency} ms]", + ilog("Received block {id}... #{n} @ {t} signed by {p} [trxs: {count}, lib: {lib}, conf: {confs}, latency: {latency} ms]", ("p",block->producer.to_string())("id",id.str().substr(8,16))("n",blk_num)("t",block->timestamp.to_time_point()) ("count",block->transactions.size())("lib",chain.last_irreversible_block_num()) ("confs", block->confirmed)("latency", (fc::time_point::now() - block->timestamp).count()/1000 ) ); if( chain.get_read_mode() != db_read_mode::IRREVERSIBLE && hbs->id != id && hbs->block != nullptr ) { // not applied to head - ilog("Block not applied to head ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, dpos: ${dpos}, conf: ${confs}, latency: ${latency} ms]", + ilog("Block not applied to head {id}... #{n} @ {t} signed by {p} [trxs: {count}, dpos: {dpos}, conf: {confs}, latency: {latency} ms]", ("p",hbs->block->producer.to_string())("id",hbs->id.str().substr(8,16))("n",hbs->block_num)("t",hbs->block->timestamp.to_time_point()) ("count",hbs->block->transactions.size())("dpos", hbs->dpos_irreversible_blocknum) ("confs", hbs->block->confirmed)("latency", (fc::time_point::now() - hbs->block->timestamp).count()/1000 ) ); @@ -379,13 +381,13 @@ class producer_plugin_impl : public std::enable_shared_from_thisid())("a",trx->get_transaction().first_authorizer().to_string())("why",ex->what())); next(ex); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {entire_trx}", // ("entire_trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); -// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {trx}", // ("trx", self->chain_plug->get_log_trx(trx->get_transaction()).as_string())); }; try { @@ -418,65 +420,69 @@ class producer_plugin_impl : public std::enable_shared_from_this(except_ptr, trx)); - auto get_trace = [&](const std::variant& response) -> fc::variant { + auto get_trace = [&](const std::variant& response) -> string { if (std::holds_alternative(response)) { - return fc::variant{std::get(response)}; + return ""; // TODO... } else { - return chain_plug->get_log_trx_trace( std::get(response) ); + return chain_plug->get_log_trx_trace(std::get(response), chain); } }; if (except_ptr) { if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${txid}, auth: ${a} : ${why} ", + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Block {block_num} for producer {prod} is REJECTING tx: {txid}, auth: {a} : {why} ", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) ("txid", trx->id()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string()) ("why",except_ptr->what())); - fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${trx}", + fc_dlog(_trx_log, "[TRX_TRACE] Block {block_num} for producer {prod} is REJECTING tx: {trx}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) //("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); ("trx", trx->packed_trx()->get_transaction())); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${entire_trace}", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block {block_num} for producer {prod} is REJECTING tx: {entire_trace}", // ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) // ("entire_trace", get_trace(response).as_string())); } else { - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid}, auth: ${a} : ${why} ", + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {txid}, auth: {a} : {why} ", ("txid", trx->id()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string()) ("why",except_ptr->what())); -// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx} ", +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {trx} ", // ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trace} ", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {entire_trace} ", // ("entire_trace", as_string()))get_trace(response).; } } else { if (_pending_block_mode == pending_block_mode::producing) { - fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${txid}, auth: ${a}", + fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Block {block_num} for producer {prod} is ACCEPTING tx: {txid}, auth: {a}", ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) ("txid", trx->id()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string())); -// fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Block {block_num} for producer {prod} is ACCEPTING tx: {trx}", // ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) // ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); -// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${entire_trace}", +// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Block {block_num} for producer {prod} is ACCEPTING tx: {entire_trace}", // ("block_num", chain.head_block_num() + 1)("prod", get_pending_block_producer().to_string()) // ("entire_trace", get_trace(response).as_string())); - _trx_log.get_agent_logger()->set_level(spdlog::level::debug); - fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", - ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction(), chain))); +// _trx_log.get_agent_logger()->set_level(spdlog::level::debug); +// fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", +// ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction(), chain))); + _trx_trace_success_log.get_agent_logger()->set_level(spdlog::level::debug); + fc_dlog(_trx_trace_success_log, "[TRX_TRACE] tx: {entire_trace}", + ("entire_trace", get_trace(response))); + } else { - fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}, auth: ${a}", + fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: {txid}, auth: {a}", ("txid", trx->id()) ("a", trx->packed_trx()->get_transaction().first_authorizer().to_string())); -// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: {trx}", // ("trx", chain_plug->get_log_trx(trx->packed_trx()->get_transaction()).as_string())); -// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${entire_trace}", +// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: {entire_trace}", // ("entire_trace", get_trace(response).as_string())); } } @@ -536,17 +542,17 @@ class producer_plugin_impl : public std::enable_shared_from_thisbilled_cpu_time_us, false, sub_bill ); - fc_dlog( _trx_failed_trace_log, "Subjective bill for ${a}: ${b} elapsed ${t}us", ("a",first_auth.to_string())("b",sub_bill)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective bill for {a}: {b} elapsed {t}us", ("a",first_auth.to_string())("b",sub_bill)("t",trace->elapsed)); if( trace->except ) { if( exception_is_exhausted( *trace->except, deadline_is_subjective )) { _unapplied_transactions.add_incoming( trx, persist_until_expired, return_failure_trace, next ); if( _pending_block_mode == pending_block_mode::producing ) { - fc_dlog(_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING, ec: ${c} ", + fc_dlog(_log, "[TRX_TRACE] Block {block_num} for producer {prod} COULD NOT FIT, tx: {txid} RETRYING, ec: {c} ", ("block_num", chain.head_block_num() + 1) ("prod", get_pending_block_producer().to_string()) ("txid", trx->id())("c", trace->except->code())); } else { - fc_dlog(_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING, ec: ${c}", + fc_dlog(_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: {txid} RETRYING, ec: {c}", ("txid", trx->id())("c", trace->except->code())); } exhausted = block_is_exhausted(); @@ -779,7 +785,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ auto key_id_to_wif_pair = dejsonify>(key_id_to_wif_pair_string); my->_signature_providers[key_id_to_wif_pair.first] = app().get_plugin().signature_provider_for_private_key(key_id_to_wif_pair.second); auto blanked_privkey = std::string(key_id_to_wif_pair.second.to_string().size(), '*' ); - wlog("\"private-key\" is DEPRECATED, use \"signature-provider=${pub}=KEY:${priv}\"", ("pub",key_id_to_wif_pair.first.to_string())("priv", blanked_privkey)); + wlog("\"private-key\" is DEPRECATED, use \"signature-provider={pub}=KEY:{priv}\"", ("pub",key_id_to_wif_pair.first.to_string())("priv", blanked_privkey)); } catch ( const std::exception& e ) { elog("Malformed private key pair"); } @@ -793,11 +799,11 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ const auto& [pubkey, provider] = app().get_plugin().signature_provider_for_specification(key_spec_pair); my->_signature_providers[pubkey] = provider; } catch(secure_enclave_exception& e) { - elog("Error with Secure Enclave signature provider: ${e}; ignoring ${val}", ("e", e.top_message())("val", key_spec_pair)); + elog("Error with Secure Enclave signature provider: {e}; ignoring {val}", ("e", e.top_message())("val", key_spec_pair)); } catch (fc::exception& e) { - elog("Malformed signature provider: \"${val}\": ${e}, ignoring!", ("val", key_spec_pair)("e", e.to_string())); + elog("Malformed signature provider: \"{val}\": {e}, ignoring!", ("val", key_spec_pair)("e", e.to_string())); } catch (...) { - elog("Malformed signature provider: \"${val}\", ignoring!", ("val", key_spec_pair)); + elog("Malformed signature provider: \"{val}\", ignoring!", ("val", key_spec_pair)); } } } @@ -856,7 +862,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ bool disable_subjective_billing = options.at("disable-subjective-billing").as(); my->_disable_subjective_p2p_billing = options.at("disable-subjective-p2p-billing").as(); my->_disable_subjective_api_billing = options.at("disable-subjective-api-billing").as(); - dlog( "disable-subjective-billing: ${s}, disable-subjective-p2p-billing: ${p2p}, disable-subjective-api-billing: ${api}", + dlog( "disable-subjective-billing: {s}, disable-subjective-p2p-billing: {p2p}, disable-subjective-api-billing: {api}", ("s", disable_subjective_billing)("p2p", my->_disable_subjective_p2p_billing)("api", my->_disable_subjective_api_billing) ); if( !disable_subjective_billing ) { my->_disable_subjective_p2p_billing = my->_disable_subjective_api_billing = false; @@ -974,7 +980,7 @@ void producer_plugin::plugin_startup() } if (!my->_producers.empty()) { - ilog("Launching block production for ${n} producers at ${time}.", ("n", my->_producers.size())("time",fc::time_point::now())); + ilog("Launching block production for {n} producers at {time}.", ("n", my->_producers.size())("time",fc::time_point::now())); if (my->_production_enabled) { if (chain.head_block_num() == 0) { @@ -1450,7 +1456,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const auto& hbs = chain.head_block_state(); if( chain.get_terminate_at_block() > 0 && chain.get_terminate_at_block() < chain.head_block_num() ) { - ilog("Reached configured maximum block ${num}; terminating", ("num", chain.get_terminate_at_block())); + ilog("Reached configured maximum block {num}; terminating", ("num", chain.get_terminate_at_block())); app().quit(); return start_block_result::failed; } @@ -1482,13 +1488,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } else if( _producers.find(scheduled_producer.producer_name) == _producers.end()) { _pending_block_mode = pending_block_mode::speculating; } else if (num_relevant_signatures == 0) { - elog("Not producing block because I don't have any private keys relevant to authority: ${authority}", ("authority", scheduled_producer.authority)); + elog("Not producing block because I don't have any private keys relevant to authority: {authority}", ("authority", scheduled_producer.authority)); _pending_block_mode = pending_block_mode::speculating; } else if ( _pause_production ) { elog("Not producing block because production is explicitly paused"); _pending_block_mode = pending_block_mode::speculating; } else if ( _max_irreversible_block_age_us.count() >= 0 && irreversible_block_age >= _max_irreversible_block_age_us ) { - elog("Not producing block because the irreversible block is too old [age:${age}s, max:${max}s]", ("age", irreversible_block_age.count() / 1'000'000)( "max", _max_irreversible_block_age_us.count() / 1'000'000 )); + elog("Not producing block because the irreversible block is too old [age:{age}s, max:{max}s]", ("age", irreversible_block_age.count() / 1'000'000)( "max", _max_irreversible_block_age_us.count() / 1'000'000 )); _pending_block_mode = pending_block_mode::speculating; } @@ -1497,13 +1503,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (current_watermark) { const block_timestamp_type block_timestamp{block_time}; if (current_watermark->first > hbs->block_num) { - elog("Not producing block because \"${producer}\" signed a block at a higher block number (${watermark}) than the current fork's head (${head_block_num})", + elog("Not producing block because \"{producer}\" signed a block at a higher block number ({watermark}) than the current fork's head ({head_block_num})", ("producer", scheduled_producer.producer_name.to_string()) ("watermark", current_watermark->first) ("head_block_num", hbs->block_num)); _pending_block_mode = pending_block_mode::speculating; } else if (current_watermark->second >= block_timestamp) { - elog("Not producing block because \"${producer}\" signed a block at the next block time or later (${watermark}) than the pending block time (${block_timestamp})", + elog("Not producing block because \"{producer}\" signed a block at the next block time or later ({watermark}) than the pending block time ({block_timestamp})", ("producer", scheduled_producer.producer_name.to_string()) ("watermark", current_watermark->second.to_time_point()) ("block_timestamp", block_timestamp.to_time_point())); @@ -1521,7 +1527,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (_pending_block_mode == pending_block_mode::producing) { const auto start_block_time = block_time - fc::microseconds( config::block_interval_us ); if( now < start_block_time ) { - fc_dlog(_log, "Not producing block waiting for production window ${n} ${bt}", ("n", hbs->block_num + 1)("bt", block_time) ); + fc_dlog(_log, "Not producing block waiting for production window {n} {bt}", ("n", hbs->block_num + 1)("bt", block_time) ); // start_block_time instead of block_time because schedule_delayed_production_loop calculates next block time from given time schedule_delayed_production_loop(weak_from_this(), calculate_producer_wake_up_time(start_block_time)); return start_block_result::waiting_for_production; @@ -1529,12 +1535,12 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } else if (previous_pending_mode == pending_block_mode::producing) { // just produced our last block of our round const auto start_block_time = block_time - fc::microseconds( config::block_interval_us ); - fc_dlog(_log, "Not starting speculative block until ${bt}", ("bt", start_block_time) ); + fc_dlog(_log, "Not starting speculative block until {bt}", ("bt", start_block_time) ); schedule_delayed_production_loop( weak_from_this(), start_block_time); return start_block_result::waiting_for_production; } - fc_dlog(_log, "Starting block #${n} at ${time} producer ${p}", + fc_dlog(_log, "Starting block #{n} at {time} producer {p}", ("n", hbs->block_num + 1)("time", now)("p", scheduled_producer.producer_name.to_string())); try { @@ -1570,11 +1576,11 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } catch ( const boost::interprocess::bad_alloc& ) { chain_plugin::handle_bad_alloc(); } catch( const fc::exception& e ) { - wlog( "protocol features to activate are no longer all valid: ${details}", + wlog( "protocol features to activate are no longer all valid: {details}", ("details",e.to_detail_string()) ); drop_features_to_activate = true; } catch( const std::exception& e ) { - wlog( "protocol features to activate are no longer all valid: ${details}", + wlog( "protocol features to activate are no longer all valid: {details}", ("details",fc::std_exception_wrapper::from_current_exception(e).to_detail_string()) ); drop_features_to_activate = true; } @@ -1598,7 +1604,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } std::swap( features_to_activate, protocol_features_to_activate ); _protocol_features_signaled = true; - ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ilog( "signaling activation of the following protocol features in block {num}: {features_to_activate}", ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } } @@ -1611,7 +1617,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { const fc::time_point preprocess_deadline = calculate_block_deadline(block_time); if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_authority != scheduled_producer.authority) { - elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); + elog("Unexpected block signing authority, reverting to speculative mode! [expected: \"{expected}\", actual: \"{actual\"", ("expected", scheduled_producer.authority)("actual", pending_block_signing_authority)); _pending_block_mode = pending_block_mode::speculating; } @@ -1680,36 +1686,36 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) if( trx_type == trx_enum_type::persisted ) { if( pbm == pending_block_mode::producing ) { fc_dlog(_trx_failed_trace_log, - "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", + "[TRX_TRACE] Block {block_num} for producer {prod} is EXPIRING PERSISTED tx: {txid}", ("block_num", chain.head_block_num() + 1)("txid", packed_trx_ptr->id()) ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) ); -// fc_dlog(_trx_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Block {block_num} for producer {prod} is EXPIRING PERSISTED tx: {trx}", // ("block_num", chain.head_block_num() + 1) // ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) // ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${entire_trx}", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Block {block_num} for producer {prod} is EXPIRING PERSISTED tx: {entire_trx}", // ("block_num", chain.head_block_num() + 1) // ("prod", chain.is_building_block() ? chain.pending_block_producer().to_string() : name().to_string()) // ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } else { - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", ("txid", packed_trx_ptr->id())); + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: {txid}", ("txid", packed_trx_ptr->id())); -// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: {trx}", // ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${entire_trx}", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: {entire_trx}", // ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } ++num_expired_persistent; } else { if (has_producers) { fc_dlog(_trx_failed_trace_log, - "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", + "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : {txid}", ("txid", packed_trx_ptr->id())); -// fc_dlog(_trx_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: {trx}", // ("trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: ${entire_trx}", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED: {entire_trx}", // ("entire_trx", chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string())); } ++num_expired_other; @@ -1718,11 +1724,11 @@ bool producer_plugin_impl::remove_expired_trxs( const fc::time_point& deadline ) if( exhausted ) { fc_wlog( _log, "Unable to process all expired transactions in unapplied queue before deadline, " - "Persistent expired ${persistent_expired}, Other expired ${other_expired}", + "Persistent expired {persistent_expired}, Other expired {other_expired}", ("persistent_expired", num_expired_persistent)("other_expired", num_expired_other) ); } else { - fc_dlog( _log, "Processed ${m} expired transactions of the ${n} transactions in the unapplied queue, " - "Persistent expired ${persistent_expired}, Other expired ${other_expired}", + fc_dlog( _log, "Processed {m} expired transactions of the {n} transactions in the unapplied queue, " + "Persistent expired {persistent_expired}, Other expired {other_expired}", ("m", num_expired_persistent+num_expired_other)("n", orig_count) ("persistent_expired", num_expired_persistent)("other_expired", num_expired_other) ); } @@ -1750,7 +1756,7 @@ bool producer_plugin_impl::remove_expired_blacklisted_trxs( const fc::time_point num_expired++; } - fc_dlog(_log, "Processed ${n} blacklisted transactions, Expired ${expired}", + fc_dlog(_log, "Processed {n} blacklisted transactions, Expired {expired}", ("n", orig_count)("expired", num_expired)); } return !exhausted; @@ -1797,7 +1803,7 @@ class account_failures { if( !reason.empty() ) reason += ", "; reason += "other"; } - fc_dlog( _log, "Dropped ${n} trxs, account: ${a}, reason: ${r} exceeded", + fc_dlog( _log, "Dropped {n} trxs, account: {a}, reason: {r} exceeded", ("n", e.second.num_failures - max_failures_per_account)("a", e.first.to_string())("r", reason) ); } } @@ -1823,7 +1829,7 @@ class account_failures { ex_flags = set_field( ex_flags, ex_fields::ex_eosio_assert_exception ); } else { ex_flags = set_field( ex_flags, ex_fields::ex_other_exception ); - fc_dlog( _log, "Failed trx, account: ${a}, reason: ${r}", + fc_dlog( _log, "Failed trx, account: {a}, reason: {r}", ("a", n.to_string())("r", exception_code) ); } } @@ -1896,7 +1902,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin const uint32_t sub_bill = 0; auto trace = chain.push_transaction( trx, trx_deadline, prev_billed_cpu_time_us, false, sub_bill ); - fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for ${a}: ${b} prev ${t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for {a}: {b} prev {t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); if( trace->except ) { if( exception_is_exhausted( *trace->except, deadline_is_subjective ) ) { if( block_is_exhausted() ) { @@ -1906,11 +1912,11 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin } // don't erase, subjective failure so try again next time } else { - fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for failed ${a}: ${b} prev ${t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_failed_trace_log, "Subjective unapplied bill for failed {a}: {b} prev {t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); auto failure_code = trace->except->code(); if( failure_code != tx_duplicate::code_value ) { // this failed our configured maximum transaction time, we don't want to replay it - fc_dlog( _log, "Failed ${c} trx, prev billed: ${p}us, ran: ${r}us, id: ${id}", + fc_dlog( _log, "Failed {c} trx, prev billed: {p}us, ran: {r}us, id: {id}", ("c", trace->except->code())("p", prev_billed_cpu_time_us) ("r", fc::time_point::now() - start)("id", trx->id()) ); account_fails.add( first_auth, failure_code ); @@ -1928,7 +1934,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin continue; } } else { - fc_dlog( _trx_successful_trace_log, "Subjective unapplied bill for success ${a}: ${b} prev ${t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); + fc_dlog( _trx_successful_trace_log, "Subjective unapplied bill for success {a}: {b} prev {t}us", ("a",first_auth.to_string())("b",prev_billed_cpu_time_us)("t",trace->elapsed)); // if db_read_mode SPECULATIVE then trx is in the pending block and not immediately reverted _subjective_billing.subjective_bill( trx->id(), trx->packed_trx()->expiration(), first_auth, trace->elapsed, chain.get_read_mode() == chain::db_read_mode::SPECULATIVE ); @@ -1943,7 +1949,7 @@ bool producer_plugin_impl::process_unapplied_trxs( const fc::time_point& deadlin ++itr; } - fc_dlog( _log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", + fc_dlog( _log, "Processed {m} of {n} previously applied transactions, Applied {applied}, Failed/Dropped {failed}", ("m", num_processed)( "n", unapplied_trxs_size )("applied", num_applied)("failed", num_failed) ); account_fails.report(); } @@ -2053,7 +2059,7 @@ void producer_plugin_impl::process_scheduled_and_incoming_trxs( const fc::time_p if( scheduled_trxs_size > 0 ) { fc_dlog( _log, - "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + "Processed {m} of {n} scheduled transactions, Applied {applied}, Failed/Dropped {failed}", ( "m", num_processed )( "n", scheduled_trxs_size )( "applied", num_applied )( "failed", num_failed ) ); } } @@ -2063,7 +2069,7 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline bool exhausted = false; if( pending_incoming_process_limit ) { size_t processed = 0; - fc_dlog( _log, "Processing ${n} pending transactions", ("n", pending_incoming_process_limit) ); + fc_dlog( _log, "Processing {n} pending transactions", ("n", pending_incoming_process_limit) ); auto itr = _unapplied_transactions.incoming_begin(); auto end = _unapplied_transactions.incoming_end(); while( pending_incoming_process_limit && itr != end ) { @@ -2083,7 +2089,7 @@ bool producer_plugin_impl::process_incoming_trxs( const fc::time_point& deadline break; } } - fc_dlog( _log, "Processed ${n} pending transactions, ${p} left", ("n", processed)("p", _unapplied_transactions.incoming_size()) ); + fc_dlog( _log, "Processed {n} pending transactions, {p} left", ("n", processed)("p", _unapplied_transactions.incoming_size()) ); } return !exhausted; } @@ -2160,12 +2166,12 @@ void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { "producing without pending_block_state, start_block succeeded" ); _timer.expires_at( epoch + boost::posix_time::microseconds( deadline.time_since_epoch().count() ) ); - fc_dlog( _log, "Scheduling Block Production on Normal Block #${num} for ${time}", + fc_dlog( _log, "Scheduling Block Production on Normal Block #{num} for {time}", ("num", chain.head_block_num() + 1)( "time", deadline ) ); } else { EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); _timer.expires_from_now( boost::posix_time::microseconds( 0 ) ); - fc_dlog( _log, "Scheduling Block Production on ${desc} Block #${num} immediately", + fc_dlog( _log, "Scheduling Block Production on {desc} Block #{num} immediately", ("num", chain.head_block_num() + 1)("desc", block_is_exhausted() ? "Exhausted" : "Deadline exceeded") ); } @@ -2175,9 +2181,9 @@ void producer_plugin_impl::schedule_maybe_produce_block( bool exhausted ) { if( self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id ) { // pending_block_state expected, but can't assert inside async_wait auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; - fc_dlog( _log, "Produce block timer for ${num} running at ${time}", ("num", block_num)("time", fc::time_point::now()) ); + fc_dlog( _log, "Produce block timer for {num} running at {time}", ("num", block_num)("time", fc::time_point::now()) ); auto res = self->maybe_produce_block(); - fc_dlog( _log, "Producing Block #${num} returned: ${res}", ("num", block_num)( "res", res ) ); + fc_dlog( _log, "Producing Block #{num} returned: {res}", ("num", block_num)( "res", res ) ); } } ) ); } @@ -2208,7 +2214,7 @@ std::optional producer_plugin_impl::calculate_producer_wake_up_t void producer_plugin_impl::schedule_delayed_production_loop(const std::weak_ptr& weak_this, std::optional wake_up_time) { if (wake_up_time) { - fc_dlog(_log, "Scheduling Speculative/Production Change at ${time}", ("time", wake_up_time.value())); + fc_dlog(_log, "Scheduling Speculative/Production Change at {time}", ("time", wake_up_time.value())); static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); _timer.expires_at(epoch + boost::posix_time::microseconds(wake_up_time->time_since_epoch().count())); _timer.async_wait( app().get_priority_queue().wrap( priority::high, @@ -2239,7 +2245,7 @@ bool producer_plugin_impl::maybe_produce_block() { static auto make_debug_time_logger() { auto start = fc::time_point::now(); return fc::make_scoped_exit([=](){ - fc_dlog(_log, "Signing took ${ms}us", ("ms", fc::time_point::now() - start) ); + fc_dlog(_log, "Signing took {ms}us", ("ms", fc::time_point::now() - start) ); }); } @@ -2252,7 +2258,7 @@ static auto maybe_make_debug_time_logger() -> std::optionalchain(); EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); @@ -2291,19 +2297,19 @@ void producer_plugin_impl::produce_block() { chain.commit_block(); block_state_ptr new_bs = chain.head_block_state(); - ilog("Produced block ${id}... #${n} @ ${t} signed by ${p} [trxs: ${count}, lib: ${lib}, confirmed: ${confs}]", + ilog("Produced block {id}... #{n} @ {t} signed by {p} [trxs: {count}, lib: {lib}, confirmed: {confs}]", ("p",new_bs->header.producer.to_string())("id",new_bs->id.str().substr(8,16)) ("n",new_bs->block_num)("t",new_bs->header.timestamp.to_time_point()) ("count",new_bs->block->transactions.size())("lib",chain.last_irreversible_block_num())("confs", new_bs->header.confirmed)); } void producer_plugin::log_failed_transaction(const transaction_id_type& trx_id, const packed_transaction_ptr& packed_trx_ptr, const char* reason) const { - fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${txid} : ${why}", + fc_dlog(_trx_failed_trace_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {txid} : {why}", ("txid", trx_id)("why", reason)); -// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${trx}", +// fc_dlog(_trx_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {trx}", // ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); -// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: ${entire_trx}", +// fc_dlog(_trx_trace_failure_log, "[TRX_TRACE] Speculative execution is REJECTING tx: {entire_trx}", // ("entire_trx", packed_trx_ptr ? my->chain_plug->get_log_trx(packed_trx_ptr->get_transaction()).as_string() : trx_id)); } diff --git a/plugins/producer_plugin/test/test_trx_full.cpp b/plugins/producer_plugin/test/test_trx_full.cpp index 512854b9c15..954b24a0e17 100644 --- a/plugins/producer_plugin/test/test_trx_full.cpp +++ b/plugins/producer_plugin/test/test_trx_full.cpp @@ -148,7 +148,7 @@ BOOST_AUTO_TEST_CASE(producer) { const size_t num_pushes = 4242; for( size_t i = 1; i <= num_pushes; ++i ) { auto ptrx = make_unique_trx( chain_id ); - dlog( "posting ${id}", ("id", ptrx->id()) ); + dlog( "posting {id}", ("id", ptrx->id()) ); app().post( priority::low, [ptrx, &next_calls, &num_posts, &trace_with_except, &trx_match, &trxs]() { ++num_posts; bool return_failure_traces = false; // not supported in version 2.1.x, in 2.2.x+ = num_posts % 2 == 0; @@ -162,12 +162,12 @@ BOOST_AUTO_TEST_CASE(producer) { if( std::get( result )->id == ptrx->id() ) { trxs.push_back( ptrx ); } else { - elog( "trace not for trx ${id}: ${t}", + elog( "trace not for trx {id}: {t}", ("id", ptrx->id())("t", fc::json::to_pretty_string(*std::get(result))) ); trx_match = false; } } else if( !return_failure_traces && !std::holds_alternative( result ) && std::get( result )->except ) { - elog( "trace with except ${e}", + elog( "trace with except {e}", ("e", fc::json::to_pretty_string( *std::get( result ) )) ); ++trace_with_except; } diff --git a/plugins/resource_monitor_plugin/include/eosio/resource_monitor_plugin/file_space_handler.hpp b/plugins/resource_monitor_plugin/include/eosio/resource_monitor_plugin/file_space_handler.hpp index 994dd5780d0..0f38a8d526b 100644 --- a/plugins/resource_monitor_plugin/include/eosio/resource_monitor_plugin/file_space_handler.hpp +++ b/plugins/resource_monitor_plugin/include/eosio/resource_monitor_plugin/file_space_handler.hpp @@ -49,7 +49,7 @@ namespace eosio::resource_monitor { // As the system is running and this plugin is not a critical // part of the system, we should not exit. // Just report the failure and continue; - wlog( "Unable to get space info for ${path_name}: [code: ${ec}] ${message}. Ignore this failure.", + wlog( "Unable to get space info for {path_name}: [code: {ec}] {message}. Ignore this failure.", ("path_name", fs.path_name.string()) ("ec", ec.value()) ("message", ec.message())); @@ -59,13 +59,13 @@ namespace eosio::resource_monitor { if ( info.available < fs.shutdown_available ) { if (output_threshold_warning) { - wlog("Space usage warning: ${path}'s file system exceeded threshold ${threshold}%, available: ${available}, Capacity: ${capacity}, shutdown_available: ${shutdown_available}", ("path", fs.path_name.string()) ("threshold", shutdown_threshold) ("available", info.available) ("capacity", info.capacity) ("shutdown_available", fs.shutdown_available)); + wlog("Space usage warning: {path}'s file system exceeded threshold {threshold}%, available: {available}, Capacity: {capacity}, shutdown_available: {shutdown_available}", ("path", fs.path_name.string()) ("threshold", shutdown_threshold) ("available", info.available) ("capacity", info.capacity) ("shutdown_available", fs.shutdown_available)); } return true; } else if ( info.available < fs.warning_available && output_threshold_warning ) { - wlog("Space usage warning: ${path}'s file system approaching threshold. available: ${available}, warning_available: ${warning_available}", ("path", fs.path_name.string()) ("available", info.available) ("warning_available", fs.warning_available)); + wlog("Space usage warning: {path}'s file system approaching threshold. available: {available}, warning_available: {warning_available}", ("path", fs.path_name.string()) ("available", info.available) ("warning_available", fs.warning_available)); if ( shutdown_on_exceeded) { - wlog("nodeos will shutdown when space usage exceeds threshold ${threshold}%", ("threshold", shutdown_threshold)); + wlog("nodeos will shutdown when space usage exceeds threshold {threshold}%", ("threshold", shutdown_threshold)); } } } @@ -80,13 +80,13 @@ namespace eosio::resource_monitor { EOS_ASSERT(status == 0, chain::plugin_config_exception, "Failed to run stat on ${path} with status ${status}", ("path", path_name.string())("status", status)); - dlog("${path_name}'s file system to be monitored", ("path_name", path_name.string())); + dlog("{path_name}'s file system to be monitored", ("path_name", path_name.string())); // If the file system containing the path is already // in the filesystem list, do not add it again for (auto& fs: filesystems) { if (statbuf.st_dev == fs.st_dev) { // Two files belong to the same file system if their device IDs are the same. - dlog("${path_name}'s file system already monitored", ("path_name", path_name.string())); + dlog("{path_name}'s file system already monitored", ("path_name", path_name.string())); return; } @@ -109,7 +109,7 @@ namespace eosio::resource_monitor { // Add to the list filesystems.emplace_back(statbuf.st_dev, shutdown_available, path_name, warning_available); - ilog("${path_name}'s file system monitored. shutdown_available: ${shutdown_available}, capacity: ${capacity}, threshold: ${threshold}", ("path_name", path_name.string()) ("shutdown_available", shutdown_available) ("capacity", info.capacity) ("threshold", shutdown_threshold) ); + ilog("{path_name}'s file system monitored. shutdown_available: {shutdown_available}, capacity: {capacity}, threshold: {threshold}", ("path_name", path_name.string()) ("shutdown_available", shutdown_available) ("capacity", info.capacity) ("threshold", shutdown_threshold) ); } void space_monitor_loop() { @@ -124,7 +124,7 @@ namespace eosio::resource_monitor { timer.async_wait([this](auto& ec) { if ( ec ) { - wlog("Exit due to error: ${ec}, message: ${message}", + wlog("Exit due to error: {ec}, message: {message}", ("ec", ec.value()) ("message", ec.message())); return; diff --git a/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp b/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp index 3e8894fe782..5ecd34b20f2 100644 --- a/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp +++ b/plugins/resource_monitor_plugin/resource_monitor_plugin.cpp @@ -64,13 +64,13 @@ class resource_monitor_plugin_impl { EOS_ASSERT(interval >= monitor_interval_min && interval <= monitor_interval_max, chain::plugin_config_exception, "\"resource-monitor-interval-seconds\" must be between ${monitor_interval_min} and ${monitor_interval_max}", ("monitor_interval_min", monitor_interval_min) ("monitor_interval_max", monitor_interval_max)); space_handler.set_sleep_time(interval); - ilog("Monitoring interval set to ${interval}", ("interval", interval)); + ilog("Monitoring interval set to {interval}", ("interval", interval)); auto threshold = options.at("resource-monitor-space-threshold").as(); EOS_ASSERT(threshold >= space_threshold_min && threshold <= space_threshold_max, chain::plugin_config_exception, "\"resource-monitor-space-threshold\" must be between ${space_threshold_min} and ${space_threshold_max}", ("space_threshold_min", space_threshold_min) ("space_threshold_max", space_threshold_max)); space_handler.set_threshold(threshold, threshold - space_threshold_warning_diff); - ilog("Space usage threshold set to ${threshold}", ("threshold", threshold)); + ilog("Space usage threshold set to {threshold}", ("threshold", threshold)); if (options.count("resource-monitor-not-shutdown-on-threshold-exceeded")) { // If set, not shutdown @@ -86,7 +86,7 @@ class resource_monitor_plugin_impl { EOS_ASSERT(warning_interval >= warning_interval_min && warning_interval <= warning_interval_max, chain::plugin_config_exception, "\"resource-monitor-warning-interval\" must be between ${warning_interval_min} and ${warning_interval_max}", ("warning_interval_min", warning_interval_min) ("warning_interval_max", warning_interval_max)); space_handler.set_warning_interval(warning_interval); - ilog("Warning interval set to ${warning_interval}", ("warning_interval", warning_interval)); + ilog("Warning interval set to {warning_interval}", ("warning_interval", warning_interval)); } // Start main thread @@ -130,7 +130,7 @@ class resource_monitor_plugin_impl { } void monitor_directory(const bfs::path& path) { - dlog("${path} registered to be monitored", ("path", path.string())); + dlog("{path} registered to be monitored", ("path", path.string())); directories_registered.push_back(path); } diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index bc62d6b7673..e6329176051 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -39,9 +39,9 @@ auto catch_and_log(F f) { try { return f(); } catch (const fc::exception& e) { - fc_elog(_log, "${e}", ("e", e.to_detail_string())); + fc_elog(_log, "{e}", ("e", e.to_detail_string())); } catch (const std::exception& e) { - fc_elog(_log, "${e}", ("e", e.what())); + fc_elog(_log, "{e}", ("e", e.what())); } catch (...) { fc_elog(_log, "unknown exception"); } @@ -145,14 +145,14 @@ struct state_history_plugin_impl : std::enable_shared_from_thisget_block_id(cp.block_num); if (!id) { to_send_block_num = std::min(to_send_block_num, cp.block_num); - fc_dlog(_log, "block ${block_num} is not available", ("block_num", cp.block_num)); + fc_dlog(_log, "block {block_num} is not available", ("block_num", cp.block_num)); } else if (*id != cp.block_id) { to_send_block_num = std::min(to_send_block_num, cp.block_num); - fc_dlog(_log, "the id for block ${block_num} in block request have_positions does not match the existing", ("block_num", cp.block_num)); + fc_dlog(_log, "the id for block {block_num} in block request have_positions does not match the existing", ("block_num", cp.block_num)); } } - fc_dlog(_log, " get_blocks_request start_block_num set to ${num}", ("num", to_send_block_num)); + fc_dlog(_log, " get_blocks_request start_block_num set to {num}", ("num", to_send_block_num)); if (req.have_positions.size()) { position_it = req.have_positions.begin(); @@ -163,7 +163,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this current || to_send_block_num >= block_req.end_block_num) { - fc_dlog( _log, "Not sending, to_send_block_num: ${s}, current: ${c} block_req.end_block_num: ${b}", + fc_dlog( _log, "Not sending, to_send_block_num: {s}, current: {c} block_req.end_block_num: {b}", ("s", to_send_block_num)("c", current)("b", block_req.end_block_num) ); return; } @@ -263,7 +263,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thistimestamp < fc::minutes(5); if( fresh_block || (result.this_block && result.this_block->block_num % 1000 == 0) ) { - fc_ilog(_log, "pushing result {\"head\":{\"block_num\":${head}},\"last_irreversible\":{\"block_num\":${last_irr}},\"this_block\":{\"block_num\":${this_block}, \"id\": ${id}}} to send queue", + fc_ilog(_log, "pushing result {\"head\":{\"block_num\":{head}},\"last_irreversible\":{\"block_num\":{last_irr}},\"this_block\":{\"block_num\":{this_block}, \"id\": {id}}} to send queue", ("head", result.head.block_num)("last_irr", result.last_irreversible.block_num) ("this_block", result.this_block ? result.this_block->block_num : fc::variant().as_uint64()) ("id", block_id ? block_id->_hash[3] : 0 )); @@ -393,10 +393,10 @@ struct state_history_plugin_impl : std::enable_shared_from_thisplugin->stopping) return; if (ec) { - fc_elog(_log, "${w}: ${m}", ("w", what)("m", ec.message())); + fc_elog(_log, "{w}: {m}", ("w", what)("m", ec.message())); close_i(); return; } @@ -419,7 +419,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisplugin->sessions.remove(this->shared_from_this()); } @@ -466,7 +466,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisget_scheduled_producer(block_time); const auto producer_name = producer_authority.producer_name; if (_producer != account_name()) - ilog("producer ${cprod}, looking for ${lprod}", ("cprod", producer_name.to_string())("lprod", _producer.to_string())); + ilog("producer {cprod}, looking for {lprod}", ("cprod", producer_name.to_string())("lprod", _producer.to_string())); // start counting sequences for this producer (once we have a sequence that we saw the initial block for that producer) if (producer_name == _producer && _clean_producer_sequence) { auto slot = bsp->block->timestamp.slot; _producer_sequence += 1; - ilog("producer ${prod} seq: ${seq} slot: ${slot}", + ilog("producer {prod} seq: {seq} slot: {slot}", ("prod", producer_name.to_string()) ("seq", _producer_sequence+1) // _producer_sequence is index, aligning it with slot number ("slot", slot - _first_sequence_timeslot)); @@ -83,7 +83,7 @@ void test_control_plugin_impl::process_next_block_state(const chain::block_state if (_producer_sequence >= _where_in_sequence || last_slot) { int32_t slot_index = slot - _first_sequence_timeslot; if (last_slot && slot_index > _producer_sequence + 1){ - wlog("Producer produced less than ${n} blocks, ${l}th block is last in sequence. Likely performance issue, check timing", + wlog("Producer produced less than {n} blocks, {l}th block is last in sequence. Likely performance issue, check timing", ("n", chain::config::producer_repetitions)("l", _producer_sequence + 1)); } ilog("shutting down"); @@ -143,10 +143,10 @@ namespace test_control_apis { read_write::kill_node_on_producer_results read_write::kill_node_on_producer(const read_write::kill_node_on_producer_params& params) const { if (params.based_on_lib) { - ilog("kill on lib for producer: ${p} at their ${s} slot in sequence", ("p", params.producer.to_string())("s", params.where_in_sequence)); + ilog("kill on lib for producer: {p} at their {s} slot in sequence", ("p", params.producer.to_string())("s", params.where_in_sequence)); my->kill_on_lib(params.producer, params.where_in_sequence); } else { - ilog("kill on head for producer: ${p} at their ${s} slot in sequence", ("p", params.producer.to_string())("s", params.where_in_sequence)); + ilog("kill on head for producer: {p} at their {s} slot in sequence", ("p", params.producer.to_string())("s", params.where_in_sequence)); my->kill_on_head(params.producer, params.where_in_sequence); } return read_write::kill_node_on_producer_results{}; diff --git a/plugins/trace_api_plugin/store_provider.cpp b/plugins/trace_api_plugin/store_provider.cpp index f4915772eb6..a7a9c1b3b25 100644 --- a/plugins/trace_api_plugin/store_provider.cpp +++ b/plugins/trace_api_plugin/store_provider.cpp @@ -265,7 +265,7 @@ namespace eosio::trace_api { if (trace_found != index_found) { const std::string trace_status = trace_found ? "existing" : "new"; const std::string index_status = index_found ? "existing" : "new"; - elog("Trace file is ${ts}, but it's metadata file is ${is}. This means the files are not consistent.", ("ts", trace_status)("is", index_status)); + elog("Trace file is {ts}, but it's metadata file is {is}. This means the files are not consistent.", ("ts", trace_status)("is", index_status)); } } diff --git a/plugins/trace_api_plugin/trace_api_plugin.cpp b/plugins/trace_api_plugin/trace_api_plugin.cpp index 274de454665..40986f73a43 100644 --- a/plugins/trace_api_plugin/trace_api_plugin.cpp +++ b/plugins/trace_api_plugin/trace_api_plugin.cpp @@ -29,7 +29,7 @@ namespace { return er.to_detail_string(); } catch (const std::exception& e) { fc::exception fce( - FC_LOG_MESSAGE(warn, "std::exception: ${what}: ", ("what", e.what())), + FC_LOG_MESSAGE(warn, "std::exception: {what}: ", ("what", e.what())), fc::std_exception_code, BOOST_CORE_TYPEID(e).name(), e.what()); @@ -215,7 +215,7 @@ struct trace_api_rpc_plugin_impl : public std::enable_shared_from_thisadd_abi(account, abi); } catch (...) { - elog("Malformed trace-rpc-abi provider: \"${val}\"", ("val", entry)); + elog("Malformed trace-rpc-abi provider: \"{val}\"", ("val", entry)); throw; } } diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 1f22baedb0c..2207abe2b22 100644 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -313,7 +313,7 @@ struct txn_test_gen_plugin_impl { thread_pool.emplace( "txntest", thread_pool_size ); timer = std::make_shared(thread_pool->get_executor()); - ilog("Started transaction test plugin; generating ${p} transactions every ${m} ms by ${t} load generation threads", + ilog("Started transaction test plugin; generating {p} transactions every {m} ms by {t} load generation threads", ("p", batch_size) ("m", period) ("t", thread_pool_size)); boost::asio::post( thread_pool->get_executor(), [this]() { @@ -327,7 +327,7 @@ struct txn_test_gen_plugin_impl { boost::asio::post( thread_pool->get_executor(), [this]() { send_transaction([this](const fc::exception_ptr& e){ if (e) { - elog("pushing transaction failed: ${e}", ("e", e->to_detail_string())); + elog("pushing transaction failed: {e}", ("e", e->to_detail_string())); if(running && stop_on_trx_failed) stop_generation(); } @@ -412,7 +412,7 @@ struct txn_test_gen_plugin_impl { ilog("Stopping transaction generation test"); if (_txcount) { - ilog("${d} transactions executed, ${t}us / transaction", ("d", _txcount)("t", _total_us / (double)_txcount)); + ilog("{d} transactions executed, {t}us / transaction", ("d", _txcount)("t", _total_us / (double)_txcount)); _txcount = _total_us = 0; } } diff --git a/plugins/wallet_plugin/wallet.cpp b/plugins/wallet_plugin/wallet.cpp index c0dbd4b9721..986abbbb721 100644 --- a/plugins/wallet_plugin/wallet.cpp +++ b/plugins/wallet_plugin/wallet.cpp @@ -87,7 +87,7 @@ class soft_wallet_impl ++suffix; dest_path = destination_filename + "-" + std::to_string( suffix ) + _wallet_filename_extension; } - wlog( "backing up wallet ${src} to ${dest}", + wlog( "backing up wallet {src} to {dest}", ("src", src_path.string()) ("dest", dest_path.string()) ); @@ -215,7 +215,7 @@ class soft_wallet_impl if( wallet_filename == "" ) wallet_filename = _wallet_filename; - wlog( "saving wallet to file ${fn}", ("fn", wallet_filename) ); + wlog( "saving wallet to file {fn}", ("fn", wallet_filename) ); string data = fc::json::to_pretty_string( _wallet ); try @@ -229,7 +229,7 @@ class soft_wallet_impl // ofstream outfile{ wallet_filename }; if (!outfile) { - elog("Unable to open file: ${fn}", ("fn", wallet_filename)); + elog("Unable to open file: {fn}", ("fn", wallet_filename)); EOS_THROW(wallet_exception, "Unable to open file: ${fn}", ("fn", wallet_filename)); } outfile.write( data.c_str(), data.length() ); diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 77f9fe70037..cf66a39fe0d 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -2754,7 +2754,7 @@ int main( int argc, char** argv ) { std::optional chain_id; if( str_chain_id.size() == 0 ) { - ilog( "grabbing chain_id from ${n}", ("n", node_executable_name) ); + ilog( "grabbing chain_id from {n}", ("n", node_executable_name) ); auto info = get_info(); chain_id = info.chain_id; } else { @@ -3180,9 +3180,9 @@ int main( int argc, char** argv ) { out << args ;//<< "\n"; if( trace["block_num"].as_uint64() <= lib ) { - dlog( "\r${m}", ("m",out.str()) ); + dlog( "\r{m}", ("m",out.str()) ); } else { - wlog( "\r${m}", ("m",out.str()) ); + wlog( "\r{m}", ("m",out.str()) ); } if( printconsole ) { auto console = at["console"].as_string(); @@ -3195,7 +3195,7 @@ int main( int argc, char** argv ) { if( !fullact ) break; line.clear(); } - cerr << sout.str(); //ilog( "\r${m} ", ("m",out.str()) ); + cerr << sout.str(); //ilog( "\r{m} ", ("m",out.str()) ); } } } @@ -3850,7 +3850,7 @@ int main( int argc, char** argv ) { std::optional chain_id; if( str_chain_id.size() == 0 ) { - ilog( "grabbing chain_id from ${n}", ("n", node_executable_name) ); + ilog( "grabbing chain_id from {n}", ("n", node_executable_name) ); auto info = get_info(); chain_id = info.chain_id; } else { @@ -4475,7 +4475,7 @@ int main( int argc, char** argv ) { if (!print_recognized_errors(e, verbose)) { // Error is not recognized if (!print_help_text(e) || verbose) { - elog("Failed with error: ${e}", ("e", verbose ? e.to_detail_string() : e.to_string())); + elog("Failed with error: {e}", ("e", verbose ? e.to_detail_string() : e.to_string())); } } return 1; @@ -4489,7 +4489,7 @@ int main( int argc, char** argv ) { return 1; } catch (connection_exception& e) { if (verbose) { - elog("connect error: ${e}", ("e", e.to_detail_string())); + elog("connect error: {e}", ("e", e.to_detail_string())); } return 1; } catch ( const std::bad_alloc& ) { diff --git a/programs/cleos_tpm/main.cpp b/programs/cleos_tpm/main.cpp index c8b19f752dd..dbf2fb4c429 100644 --- a/programs/cleos_tpm/main.cpp +++ b/programs/cleos_tpm/main.cpp @@ -2716,7 +2716,7 @@ int main( int argc, char** argv ) { std::optional chain_id; if( str_chain_id.size() == 0 ) { - ilog( "grabbing chain_id from ${n}", ("n", node_executable_name) ); + ilog( "grabbing chain_id from {n}", ("n", node_executable_name) ); auto info = get_info(); chain_id = info.chain_id; } else { @@ -3119,9 +3119,9 @@ int main( int argc, char** argv ) { out << args ;//<< "\n"; if( trace["block_num"].as_uint64() <= lib ) { - dlog( "\r${m}", ("m",out.str()) ); + dlog( "\r{m}", ("m",out.str()) ); } else { - wlog( "\r${m}", ("m",out.str()) ); + wlog( "\r{m}", ("m",out.str()) ); } if( printconsole ) { auto console = at["console"].as_string(); @@ -3134,7 +3134,7 @@ int main( int argc, char** argv ) { if( !fullact ) break; line.clear(); } - cerr << sout.str(); //ilog( "\r${m} ", ("m",out.str()) ); + cerr << sout.str(); //ilog( "\r{m} ", ("m",out.str()) ); } } } @@ -3785,13 +3785,13 @@ int main( int argc, char** argv ) { signed_transaction trx; try { abi_serializer::from_variant( trx_var, trx, abi_serializer_resolver_empty, abi_serializer::create_yield_function( abi_serializer_max_time ) ); - } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Invalid transaction format: '${data}'", + } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Invalid transaction format: '{data}'", ("data", fc::json::to_string(trx_var, fc::time_point::maximum()))) std::optional chain_id; if( str_chain_id.size() == 0 ) { - ilog( "grabbing chain_id from ${n}", ("n", node_executable_name) ); + ilog( "grabbing chain_id from {n}", ("n", node_executable_name) ); auto info = get_info(); chain_id = info.chain_id; } else { @@ -4408,7 +4408,7 @@ int main( int argc, char** argv ) { if (!print_recognized_errors(e, verbose)) { // Error is not recognized if (!print_help_text(e) || verbose) { - elog("Failed with error: ${e}", ("e", verbose ? e.to_detail_string() : e.to_string())); + elog("Failed with error: {e}", ("e", verbose ? e.to_detail_string() : e.to_string())); } } return 1; @@ -4422,7 +4422,7 @@ int main( int argc, char** argv ) { return 1; } catch (connection_exception& e) { if (verbose) { - elog("connect error: ${e}", ("e", e.to_detail_string())); + elog("connect error: {e}", ("e", e.to_detail_string())); } return 1; } catch ( const std::bad_alloc& ) { diff --git a/programs/eosio-blocklog/main.cpp b/programs/eosio-blocklog/main.cpp index adc5a984fea..3161f2b1144 100644 --- a/programs/eosio-blocklog/main.cpp +++ b/programs/eosio-blocklog/main.cpp @@ -62,7 +62,7 @@ struct report_time { void report() { const auto duration = std::chrono::duration_cast(std::chrono::high_resolution_clock::now() - _start).count() / 1000; - ilog("eosio-blocklog - ${desc} took ${t} msec", ("desc", _desc)("t", duration)); + ilog("eosio-blocklog - {desc} took {t} msec", ("desc", _desc)("t", duration)); } const std::chrono::high_resolution_clock::time_point _start; @@ -78,7 +78,7 @@ void blocklog::read_log() { EOS_ASSERT( end->block_num() > 1, block_log_exception, "Only one block found in block log" ); //fix message below, first block might not be 1, first_block_num is not set yet - ilog( "existing block log contains block num ${first} through block num ${n}", + ilog( "existing block log contains block num {first} through block num {n}", ("first",block_logger.first_block_num())("n",end->block_num()) ); if (first_block < block_logger.first_block_num()) { first_block = block_logger.first_block_num(); @@ -100,7 +100,7 @@ void blocklog::read_log() { } else { auto first = fork_db_branch.rbegin(); auto last = fork_db_branch.rend() - 1; - ilog( "existing reversible fork_db block num ${first} through block num ${last} ", + ilog( "existing reversible fork_db block num {first} through block num {last} ", ("first", (*first)->block_num)( "last", (*last)->block_num ) ); EOS_ASSERT( end->block_num() + 1 == (*first)->block_num, block_log_exception, "fork_db does not start at end of block log" ); @@ -354,13 +354,13 @@ int main(int argc, char** argv) { blog.initialize(vmap); blog.read_log(); } catch( const fc::exception& e ) { - elog( "${e}", ("e", e.to_detail_string())); + elog( "{e}", ("e", e.to_detail_string())); return -1; } catch( const boost::exception& e ) { - elog("${e}", ("e",boost::diagnostic_information(e))); + elog("{e}", ("e",boost::diagnostic_information(e))); return -1; } catch( const std::exception& e ) { - elog("${e}", ("e",e.what())); + elog("{e}", ("e",e.what())); return -1; } catch( ... ) { elog("unknown exception"); diff --git a/programs/eosio-tester/main.cpp b/programs/eosio-tester/main.cpp index 4638f53bc2b..31e16bbf4e3 100644 --- a/programs/eosio-tester/main.cpp +++ b/programs/eosio-tester/main.cpp @@ -285,7 +285,7 @@ struct test_chain { void finish_block() { start_if_needed(); - ilog("finish block ${n}", ("n", control->head_block_num())); + ilog("finish block {n}", ("n", control->head_block_num())); control->finalize_block([&](eosio::chain::digest_type d) { return std::vector{ producer_key.sign(d) }; }); control->commit_block(); } @@ -705,8 +705,8 @@ struct callbacks { auto start_time = std::chrono::steady_clock::now(); auto result = chain.control->push_transaction(fut.get(), fc::time_point::maximum(), 2000, true, 0); auto us = std::chrono::duration_cast(std::chrono::steady_clock::now() - start_time); - ilog("chainlib transaction took ${u} us", ("u", us.count())); - // ilog("${r}", ("r", fc::json::to_pretty_string(result))); + ilog("chainlib transaction took {u} us", ("u", us.count())); + // ilog("{r}", ("r", fc::json::to_pretty_string(result))); set_data(cb_alloc_data, cb_alloc, convert_to_bin(chain_types::transaction_trace{ eosio::state_history::convert(*result) })); } @@ -833,14 +833,14 @@ struct callbacks { auto start_time = std::chrono::steady_clock::now(); auto result = r.query_handler->query_transaction(*r.write_snapshot, data.data(), data.size()); auto us = std::chrono::duration_cast(std::chrono::steady_clock::now() - start_time); - ilog("rodeos transaction took ${u} us", ("u", us.count())); + ilog("rodeos transaction took {u} us", ("u", us.count())); auto tt = eosio::convert_from_bin( { result.data, result.data + result.size }); auto& tt0 = std::get(tt); for (auto& at : tt0.action_traces) { auto& at1 = std::get(at); if (!at1.console.empty()) - ilog("rodeos query console: <<<\n${c}>>>", ("c", at1.console)); + ilog("rodeos query console: <<<\n{c}>>>", ("c", at1.console)); } set_data(cb_alloc_data, cb_alloc, result); } diff --git a/programs/eosio-tpmattestcheck/main.cpp b/programs/eosio-tpmattestcheck/main.cpp index 5a89f7a0ba1..b572277dddf 100644 --- a/programs/eosio-tpmattestcheck/main.cpp +++ b/programs/eosio-tpmattestcheck/main.cpp @@ -35,7 +35,7 @@ int main(int argc, char** argv) { bpo::notify(varmap); } catch(fc::exception& e) { - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); return 1; } diff --git a/programs/eosio-tpmtool/main.cpp b/programs/eosio-tpmtool/main.cpp index 0169fe2db4c..1bd7e58c10a 100644 --- a/programs/eosio-tpmtool/main.cpp +++ b/programs/eosio-tpmtool/main.cpp @@ -42,7 +42,7 @@ int main(int argc, char** argv) { bpo::notify(varmap); } catch(fc::exception& e) { - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); return 1; } diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index 563b4aaf1ed..b319d799153 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -25,11 +25,11 @@ void configure_logging(const bfs::path& config_path) { throw; } } catch (const fc::exception& e) { // - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); } catch (const boost::exception& e) { - elog("${e}", ("e", boost::diagnostic_information(e))); + elog("{e}", ("e", boost::diagnostic_information(e))); } catch (const std::exception& e) { // - elog("${e}", ("e", e.what())); + elog("{e}", ("e", e.what())); } catch (...) { // empty } @@ -38,9 +38,9 @@ void configure_logging(const bfs::path& config_path) { void logging_conf_handler() { auto config_path = app().get_logging_conf(); if (fc::exists(config_path)) { - ilog("Received HUP. Reloading logging configuration from ${p}.", ("p", config_path.string())); + ilog("Received HUP. Reloading logging configuration from {p}.", ("p", config_path.string())); } else { - ilog("Received HUP. No log config found at ${p}, setting to default.", ("p", config_path.string())); + ilog("Received HUP. No log config found at {p}, setting to default.", ("p", config_path.string())); } configure_logging(config_path); fc::log_config::initialize_appenders(app().get_io_service()); @@ -98,11 +98,11 @@ int main(int argc, char** argv) app().startup(); app().exec(); } catch (const fc::exception& e) { - elog("${e}", ("e",e.to_detail_string())); + elog("{e}", ("e",e.to_detail_string())); } catch (const boost::exception& e) { - elog("${e}", ("e",boost::diagnostic_information(e))); + elog("{e}", ("e",boost::diagnostic_information(e))); } catch (const std::exception& e) { - elog("${e}", ("e",e.what())); + elog("{e}", ("e",e.what())); } catch (...) { elog("unknown exception"); } diff --git a/programs/nodeos-sectl/main.cpp b/programs/nodeos-sectl/main.cpp index ac261503dcd..cc5d51cee08 100644 --- a/programs/nodeos-sectl/main.cpp +++ b/programs/nodeos-sectl/main.cpp @@ -46,11 +46,11 @@ int main(int argc, char** argv) { return BAD_ALLOC; } catch(const fc::exception& e) { - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); return 1; } catch(const std::exception& e) { - elog("${e}", ("e", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); + elog("{e}", ("e", fc::std_exception_wrapper::from_current_exception(e).to_detail_string())); return 1; } diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 65ea5066bb4..39afc56ed88 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -54,11 +54,11 @@ void configure_logging(const bfs::path& config_path) throw; } } catch (const fc::exception& e) { - elog("${e}", ("e",e.to_detail_string())); + elog("{e}", ("e",e.to_detail_string())); } catch (const boost::exception& e) { - elog("${e}", ("e",boost::diagnostic_information(e))); + elog("{e}", ("e",boost::diagnostic_information(e))); } catch (const std::exception& e) { - elog("${e}", ("e",e.what())); + elog("{e}", ("e",e.what())); } catch (...) { // empty } @@ -70,9 +70,9 @@ void logging_conf_handler() { auto config_path = app().get_logging_conf(); if( fc::exists( config_path ) ) { - ilog( "Received HUP. Reloading logging configuration from ${p}.", ("p", config_path.string()) ); + ilog( "Received HUP. Reloading logging configuration from {p}.", ("p", config_path.string()) ); } else { - ilog( "Received HUP. No log config found at ${p}, setting to default.", ("p", config_path.string()) ); + ilog( "Received HUP. No log config found at {p}, setting to default.", ("p", config_path.string()) ); } ::detail::configure_logging( config_path ); fc::log_config::initialize_appenders( app().get_io_service() ); @@ -133,11 +133,11 @@ int main(int argc, char** argv) return INITIALIZE_FAIL; } initialize_logging(); - ilog( "${name} version ${ver} ${fv}", + ilog( "{name} version {ver} {fv}", ("name", nodeos::config::node_executable_name)("ver", app().version_string()) ("fv", app().version_string() == app().full_version_string() ? "" : app().full_version_string()) ); - ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app().full_config_file_path().string())); - ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app().data_dir().string())); + ilog("{name} using configuration file {c}", ("name", nodeos::config::node_executable_name)("c", app().full_config_file_path().string())); + ilog("{name} data directory is {d}", ("name", nodeos::config::node_executable_name)("d", app().data_dir().string())); app().startup(); app().set_thread_priority_max(); app().exec(); @@ -157,25 +157,25 @@ int main(int argc, char** argv) } } catch (...) { } - elog( "${e}", ("e",e.to_detail_string())); + elog( "{e}", ("e",e.to_detail_string())); return OTHER_FAIL; } catch( const fc::exception& e ) { - elog( "${e}", ("e", e.to_detail_string())); + elog( "{e}", ("e", e.to_detail_string())); return OTHER_FAIL; } catch( const boost::interprocess::bad_alloc& e ) { elog("bad alloc"); return BAD_ALLOC; } catch( const boost::exception& e ) { - elog("${e}", ("e",boost::diagnostic_information(e))); + elog("{e}", ("e",boost::diagnostic_information(e))); return OTHER_FAIL; } catch( const std::exception& e ) { - elog("${e}", ("e",e.what())); + elog("{e}", ("e",e.what())); return OTHER_FAIL; } catch( ... ) { elog("unknown exception"); return OTHER_FAIL; } - ilog("${name} successfully exiting", ("name", nodeos::config::node_executable_name)); + ilog("{name} successfully exiting", ("name", nodeos::config::node_executable_name)); return SUCCESS; } diff --git a/programs/rodeos/cloner_plugin.cpp b/programs/rodeos/cloner_plugin.cpp index e4365e99904..3150e8e7ca4 100644 --- a/programs/rodeos/cloner_plugin.cpp +++ b/programs/rodeos/cloner_plugin.cpp @@ -107,12 +107,12 @@ struct cloner_session : ship_client::connection_callbacks, std::enable_shared_fr rodeos_snapshot->force_write_stride = config->force_write_stride; ilog("cloner database status:"); - ilog(" revisions: ${f} - ${r}", + ilog(" revisions: {f} - {r}", ("f", rodeos_snapshot->undo_stack->first_revision())("r", rodeos_snapshot->undo_stack->revision())); - ilog(" chain: ${a}", ("a", eosio::convert_to_json(rodeos_snapshot->chain_id))); - ilog(" head: ${a} ${b}", + ilog(" chain: {a}", ("a", eosio::convert_to_json(rodeos_snapshot->chain_id))); + ilog(" head: {a} {b}", ("a", rodeos_snapshot->head)("b", eosio::convert_to_json(rodeos_snapshot->head_id))); - ilog(" irreversible: ${a} ${b}", + ilog(" irreversible: {a} {b}", ("a", rodeos_snapshot->irreversible)("b", eosio::convert_to_json(rodeos_snapshot->irreversible_id))); rodeos_snapshot->end_write(true); @@ -128,7 +128,7 @@ struct cloner_session : ship_client::connection_callbacks, std::enable_shared_fr } bool received(get_status_result_v0& status, eosio::input_stream bin) override { - ilog("nodeos has chain ${c}", ("c", eosio::convert_to_json(status.chain_id))); + ilog("nodeos has chain {c}", ("c", eosio::convert_to_json(status.chain_id))); if (rodeos_snapshot->chain_id == eosio::checksum256{}) rodeos_snapshot->chain_id = status.chain_id; if (rodeos_snapshot->chain_id != status.chain_id) @@ -167,7 +167,7 @@ struct cloner_session : ship_client::connection_callbacks, std::enable_shared_fr if (!result.this_block) return true; if (config->stop_before && result.this_block->block_num >= config->stop_before) { - ilog("block ${b}: stop requested", ("b", result.this_block->block_num)); + ilog("block {b}: stop requested", ("b", result.this_block->block_num)); rodeos_snapshot->end_write(true); db->flush(false, false); return false; @@ -189,7 +189,7 @@ struct cloner_session : ship_client::connection_callbacks, std::enable_shared_fr bool near = result.this_block->block_num + 4 >= result.last_irreversible.block_num; bool write_now = !(result.this_block->block_num % 200) || near; if (write_now || !reported_block) - ilog("block ${b} ${i}", + ilog("block {b} {i}", ("b", result.this_block->block_num)( "i", result.this_block->block_num <= result.last_irreversible.block_num ? "irreversible" : "")); reported_block = true; diff --git a/programs/rodeos/main.cpp b/programs/rodeos/main.cpp index 985df5b34fe..e2859c56f50 100644 --- a/programs/rodeos/main.cpp +++ b/programs/rodeos/main.cpp @@ -25,11 +25,11 @@ void configure_logging(const bfs::path& config_path) { throw; } } catch (const fc::exception& e) { // - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); } catch (const boost::exception& e) { - elog("${e}", ("e", boost::diagnostic_information(e))); + elog("{e}", ("e", boost::diagnostic_information(e))); } catch (const std::exception& e) { // - elog("${e}", ("e", e.what())); + elog("{e}", ("e", e.what())); } catch (...) { // empty } @@ -40,9 +40,9 @@ void configure_logging(const bfs::path& config_path) { void logging_conf_handler() { auto config_path = app().get_logging_conf(); if (fc::exists(config_path)) { - ilog("Received HUP. Reloading logging configuration from ${p}.", ("p", config_path.string())); + ilog("Received HUP. Reloading logging configuration from {p}.", ("p", config_path.string())); } else { - ilog("Received HUP. No log config found at ${p}, setting to default.", ("p", config_path.string())); + ilog("Received HUP. No log config found at {p}, setting to default.", ("p", config_path.string())); } ::detail::configure_logging(config_path); fc::log_config::initialize_appenders(app().get_io_service()); @@ -82,36 +82,36 @@ int main(int argc, char** argv) { return initialize_fail; } initialize_logging(); - ilog("${name} version ${ver} ${fv}", + ilog("{name} version {ver} {fv}", ("name", b1::rodeos::config::rodeos_executable_name)("ver", app().version_string())( "fv", app().version_string() == app().full_version_string() ? "" : app().full_version_string())); - ilog("${name} using configuration file ${c}", + ilog("{name} using configuration file {c}", ("name", b1::rodeos::config::rodeos_executable_name)("c", app().full_config_file_path().string())); - ilog("${name} data directory is ${d}", + ilog("{name} data directory is {d}", ("name", b1::rodeos::config::rodeos_executable_name)("d", app().data_dir().string())); app().startup(); app().set_thread_priority_max(); app().exec(); } catch (const fc::std_exception_wrapper& e) { - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); return other_fail; } catch (const fc::exception& e) { - elog("${e}", ("e", e.to_detail_string())); + elog("{e}", ("e", e.to_detail_string())); return other_fail; } catch (const boost::interprocess::bad_alloc& e) { elog("bad alloc"); return bad_alloc; } catch (const boost::exception& e) { - elog("${e}", ("e", boost::diagnostic_information(e))); + elog("{e}", ("e", boost::diagnostic_information(e))); return other_fail; } catch (const std::exception& e) { - elog("${e}", ("e", e.what())); + elog("{e}", ("e", e.what())); return other_fail; } catch (...) { elog("unknown exception"); return other_fail; } - ilog("${name} successfully exiting", ("name", b1::rodeos::config::rodeos_executable_name)); + ilog("{name} successfully exiting", ("name", b1::rodeos::config::rodeos_executable_name)); return success; } diff --git a/programs/rodeos/rocksdb_plugin.cpp b/programs/rodeos/rocksdb_plugin.cpp index 5683996fb30..841f51f756f 100644 --- a/programs/rodeos/rocksdb_plugin.cpp +++ b/programs/rodeos/rocksdb_plugin.cpp @@ -59,7 +59,7 @@ void rocksdb_plugin::plugin_shutdown() {} std::shared_ptr rocksdb_plugin::get_db() { std::lock_guard lock(my->mutex); if (!my->database) { - ilog("rodeos database is ${d}", ("d", my->db_path.string())); + ilog("rodeos database is {d}", ("d", my->db_path.string())); if (!bfs::exists(my->db_path.parent_path())) bfs::create_directories(my->db_path.parent_path()); diff --git a/programs/rodeos/ship_client.hpp b/programs/rodeos/ship_client.hpp index 1ac40f3fb5b..60f25fc4d8c 100644 --- a/programs/rodeos/ship_client.hpp +++ b/programs/rodeos/ship_client.hpp @@ -187,10 +187,10 @@ struct connection : connection_base { try { f(); } catch (const eosio::chain::unsupported_feature& e) { - elog("${e}", ("e", e.what())); + elog("{e}", ("e", e.what())); close(false, true /* quitting */); } catch (const std::exception& e) { - elog("${e}", ("e", e.what())); + elog("{e}", ("e", e.what())); close(false, false); } catch (...) { elog("unknown exception"); @@ -207,13 +207,13 @@ struct connection : connection_base { void on_fail(error_code ec, const char* what) { try { - elog("${w}: ${m}", ("w", what)("m", ec.message())); + elog("{w}: {m}", ("w", what)("m", ec.message())); close(true, false); } catch (...) { elog("exception while closing"); } } void close(bool retry, bool quitting) { - ilog("closing state-history socket, retry: ${r}, quitting: ${q}", ("r", retry) ("q", quitting)); + ilog("closing state-history socket, retry: {r}, quitting: {q}", ("r", retry) ("q", quitting)); derived_connection().stream.next_layer().close(); if (callbacks) callbacks->closed(retry, quitting); @@ -226,7 +226,7 @@ struct tcp_connection : connection, std::enable_shared_from_this connection(callbacks), config(config), resolver(ioc), stream(ioc) {} void connect() { - ilog("connect to ${h}:${p}", ("h", config.host)("p", config.port)); + ilog("connect to {h}:{p}", ("h", config.host)("p", config.port)); resolver.async_resolve( // config.host, config.port, [self = shared_from_this(), this](error_code ec, tcp::resolver::results_type results) { @@ -252,7 +252,7 @@ struct unix_connection : connection, std::enable_shared_from_th connection(callbacks), config(config), stream(ioc) {} void connect() { - ilog("connect to unix path ${p}", ("p", config.path)); + ilog("connect to unix path {p}", ("p", config.path)); stream.next_layer().async_connect(config.path, [self = shared_from_this(), this](error_code ec) { enter_callback(ec, "connect", [&] { ws_handshake(""); diff --git a/programs/rodeos/streamer_plugin.cpp b/programs/rodeos/streamer_plugin.cpp index 83d90d2ff8b..67cbb5241a8 100644 --- a/programs/rodeos/streamer_plugin.cpp +++ b/programs/rodeos/streamer_plugin.cpp @@ -139,7 +139,7 @@ void streamer_plugin::plugin_initialize(const variables_map& options) { initialize_rabbits_exchange(my->streams, rabbits_exchanges, my->publish_immediately, stream_data_path); } - ilog("initialized streams: ${streams}", ("streams", my->streams.size())); + ilog("initialized streams: {streams}", ("streams", my->streams.size())); } FC_LOG_AND_RETHROW() } diff --git a/programs/rodeos/streams/logger.hpp b/programs/rodeos/streams/logger.hpp index 7226c33e3f3..462a46b7668 100644 --- a/programs/rodeos/streams/logger.hpp +++ b/programs/rodeos/streams/logger.hpp @@ -13,7 +13,7 @@ class logger : public stream_handler { } void publish(const std::vector& data, const std::string& routing_key) override { - ilog("logger stream ${r}: [${data_size}] >> ${data}", + ilog("logger stream {r}: [{data_size}] >> {data}", ("r", routing_key)("data", std::string(data.begin(), data.end()))("data_size", data.size())); } }; diff --git a/programs/rodeos/streams/rabbitmq.hpp b/programs/rodeos/streams/rabbitmq.hpp index 7bcc3c8f927..296ed6f40b8 100644 --- a/programs/rodeos/streams/rabbitmq.hpp +++ b/programs/rodeos/streams/rabbitmq.hpp @@ -29,7 +29,7 @@ class rabbitmq : public stream_handler { fc::seconds( 60 ), true, []( const std::string& err ) { - elog( "AMQP fatal error: ${e}", ("e", err) ); + elog( "AMQP fatal error: {e}", ("e", err) ); appbase::app().quit(); } ); } @@ -41,7 +41,7 @@ class rabbitmq : public stream_handler { , publish_immediately_(publish_immediately) , queue_name_( std::move( queue_name)) { - ilog("Connecting to RabbitMQ address ${a} - Queue: ${q}...", ("a", address)( "q", queue_name_)); + ilog("Connecting to RabbitMQ address {a} - Queue: {q}...", ("a", address)( "q", queue_name_)); init(); } @@ -52,7 +52,7 @@ class rabbitmq : public stream_handler { , publish_immediately_(publish_immediately) , exchange_name_( std::move( exchange_name)) { - ilog("Connecting to RabbitMQ address ${a} - Exchange: ${e}...", ("a", address)( "e", exchange_name_)); + ilog("Connecting to RabbitMQ address {a} - Exchange: {e}...", ("a", address)( "e", exchange_name_)); init(); } @@ -71,7 +71,7 @@ class rabbitmq : public stream_handler { if( publish_immediately_ ) { amqp_publisher_->publish_message_direct( exchange_name_.empty() ? queue_name_ : routing_key, data, []( const std::string& err ) { - elog( "AMQP direct message error: ${e}", ("e", err) ); + elog( "AMQP direct message error: {e}", ("e", err) ); } ); } else { queue_.emplace_back( std::make_pair( exchange_name_.empty() ? queue_name_ : routing_key, data ) ); diff --git a/programs/rodeos/streams/stream.hpp b/programs/rodeos/streams/stream.hpp index b41f0de527c..39cb6c1d253 100644 --- a/programs/rodeos/streams/stream.hpp +++ b/programs/rodeos/streams/stream.hpp @@ -47,7 +47,7 @@ inline std::vector extract_routes(const std::string& routes_str) { size_t pos = routings.find(","); size_t route_length = pos == std::string::npos ? routings.length() : pos; std::string route = routings.substr(0, pos); - ilog("extracting route ${route}", ("route", route)); + ilog("extracting route {route}", ("route", route)); if (route != "*") { streaming_routes.emplace_back(std::move(route)); } else { diff --git a/programs/rodeos/wasm_ql_http.cpp b/programs/rodeos/wasm_ql_http.cpp index 5a148459e8b..782821d5ef7 100644 --- a/programs/rodeos/wasm_ql_http.cpp +++ b/programs/rodeos/wasm_ql_http.cpp @@ -90,7 +90,7 @@ EOSIO_REFLECT(send_error_results, code, message, error) namespace b1::rodeos::wasm_ql { // Report a failure -static void fail(beast::error_code ec, const char* what) { elog("${w}: ${s}", ("w", what)("s", ec.message())); } +static void fail(beast::error_code ec, const char* what) { elog("{w}: {s}", ("w", what)("s", ec.message())); } // Return a reasonable mime type based on the extension of a file. beast::string_view mime_type(beast::string_view path) { @@ -299,7 +299,7 @@ void handle_request(const wasm_ql::http_config& http_config, const wasm_ql::shar send(ok(std::move(json_result), "application/json")); } else { try { - // elog("query failed: ${s}", ("s", e.what())); + // elog("query failed: {s}", ("s", e.what())); send_error_results err; err.code = (uint16_t)http::status::internal_server_error; err.message = "Internal Service Error"; @@ -377,7 +377,7 @@ void handle_request(const wasm_ql::http_config& http_config, const wasm_ql::shar } } catch (const eosio::vm::exception& e) { try { - // elog("query failed: ${s}", ("s", e.what())); + // elog("query failed: {s}", ("s", e.what())); error_results err; err.code = (uint16_t)http::status::internal_server_error; err.message = "Internal Service Error"; @@ -389,7 +389,7 @@ void handle_request(const wasm_ql::http_config& http_config, const wasm_ql::shar } } catch (const std::exception& e) { try { - // elog("query failed: ${s}", ("s", e.what())); + // elog("query failed: {s}", ("s", e.what())); error_results err; err.code = (uint16_t)http::status::internal_server_error; err.message = "Internal Service Error"; @@ -668,7 +668,7 @@ class listener : public std::enable_shared_from_this { auto check_ec = [&](const char* what) { if (!ec) return; - elog("${w}: ${m}", ("w", what)("m", ec.message())); + elog("{w}: {m}", ("w", what)("m", ec.message())); FC_ASSERT(false, "unable to open listen socket"); }; @@ -708,14 +708,14 @@ class listener : public std::enable_shared_from_this { // Create the http session and run it if constexpr (std::is_same_v) { boost::system::error_code ec; - dlog( "Accepting connection from ${ra}:${rp} to ${la}:${lp}", + dlog( "Accepting connection from {ra}:{rp} to {la}:{lp}", ("ra", socket.remote_endpoint(ec).address().to_string())("rp", socket.remote_endpoint(ec).port()) ("la", socket.local_endpoint(ec).address().to_string())("lp", socket.local_endpoint(ec).port()) ); std::make_shared( http_config, shared_state, state_cache, std::move( socket ) )->run(); } else if constexpr (std::is_same_v) { boost::system::error_code ec; auto rep = socket.remote_endpoint(ec); - dlog( "Accepting connection from ${r}", ("r", rep.path()) ); + dlog( "Accepting connection from {r}", ("r", rep.path()) ); std::make_shared( http_config, shared_state, state_cache, std::move( socket ) )->run(); } } diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index cd4fd4bb8ac..d00d9cda781 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1674,7 +1674,7 @@ BOOST_AUTO_TEST_CASE_TEMPLATE(more_deferred_transaction_tests, TYPE_T, backing_s auto print_deferred = [&index]() { for( const auto& gto : index ) { - wlog("id = ${id}, trx_id = ${trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); + wlog("id = {id}, trx_id = {trx_id}", ("id", gto.id)("trx_id", gto.trx_id)); } }; diff --git a/unittests/db_to_kv_tests.cpp b/unittests/db_to_kv_tests.cpp index 1b9596e9809..591877f106a 100644 --- a/unittests/db_to_kv_tests.cpp +++ b/unittests/db_to_kv_tests.cpp @@ -146,18 +146,18 @@ std::pair compare_composite_keys(const CharCont& lhs_cont, co const auto rhs_size = rhs_cont.size(); for (; j < lhs_size; ++j) { if (j >= rhs_size) { - if (print) ilog("${j}: lhs longer than rhs", ("j", j)); + if (print) ilog("{j}: lhs longer than rhs", ("j", j)); return { gt, j }; } const auto left = uint64_t(static_cast(lhs[j])); const auto right = uint64_t(static_cast(rhs[j])); - if (print) ilog("${j}: ${l} ${sym} ${r}", ("j", j)("l", left)("sym",(left == right ? "==" : "!="))("r", right)); + if (print) ilog("{j}: {l} {sym} {r}", ("j", j)("l", left)("sym",(left == right ? "==" : "!="))("r", right)); if (left != right) { return { left < right ? lt : gt, j }; } } if (rhs_size > lhs_size) { - if (print) ilog("rhs longer (${r}) than lhs (${l})", ("r", rhs_size)("l", lhs_size)); + if (print) ilog("rhs longer ({r}) than lhs ({l})", ("r", rhs_size)("l", lhs_size)); return { lt, j }; } @@ -166,7 +166,7 @@ std::pair compare_composite_keys(const CharCont& lhs_cont, co template std::pair compare_composite_keys(const CharKey& keys, uint64_t lhs_index, bool print = false) { - if (print) ilog("verifying [${i1}] and [${i2}]",("i1", lhs_index)("i2", lhs_index + 1)); + if (print) ilog("verifying [{i1}] and [{i2}]",("i1", lhs_index)("i2", lhs_index + 1)); return compare_composite_keys(keys[lhs_index], keys[lhs_index + 1], print); } diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index c16eee75291..d5de3a3075f 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1415,7 +1415,7 @@ BOOST_AUTO_TEST_CASE(public_key_from_hash) { std::memcpy(&data.data[1], hash.data(), hash.data_size() ); fc::ecc::public_key_shim shim(data); fc::crypto::public_key eos_unknown_pk(std::move(shim)); - ilog( "public key with no known private key: ${k}", ("k", eos_unknown_pk.to_string()) ); + ilog( "public key with no known private key: {k}", ("k", eos_unknown_pk.to_string()) ); } BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index b5383ca3c4b..d89bc7b76ec 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -482,10 +482,10 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { if( act.account == "eosio"_n && act.name == "onblock"_n ) return; if( t->receipt && t->receipt->status == transaction_receipt::executed ) { - wlog( "${trx_type} ${id} executed (first action is ${code}::${action})", + wlog( "{trx_type} {id} executed (first action is {code}::{action})", ("trx_type", t->scheduled ? "scheduled trx" : "trx")("id", t->id)("code", act.account.to_string())("action", act.name.to_string()) ); } else { - wlog( "${trx_type} ${id} failed (first action is ${code}::${action})", + wlog( "{trx_type} {id} failed (first action is {code}::{action})", ("trx_type", t->scheduled ? "scheduled trx" : "trx")("id", t->id)("code", act.account.to_string())("action", act.name.to_string()) ); } }; From 383a0d128d03fb1570fcfd1f86333ca8d03c843d Mon Sep 17 00:00:00 2001 From: Jingjun Zhao Date: Tue, 22 Mar 2022 19:17:09 -0400 Subject: [PATCH 29/31] Test new class to_string_visitor used for converting reflected types into string --- libraries/fc | 2 +- plugins/producer_plugin/producer_plugin.cpp | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/libraries/fc b/libraries/fc index 5c53bed83fa..553be362450 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 5c53bed83fa8f8a7e3b88175ff1eabfd4abd19a2 +Subproject commit 553be3624505bbebb24771a743699133a9a42e6e diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f617bebdf19..09ec80ebb26 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -28,7 +29,6 @@ #include #include -#include namespace bmi = boost::multi_index; using bmi::indexed_by; @@ -468,12 +468,20 @@ class producer_plugin_impl : public std::enable_shared_from_thisset_level(spdlog::level::debug); + _trx_log.get_agent_logger()->set_level(spdlog::level::debug); // fc_dlog(_trx_log, "[TRX_TRACE] tx: ${trx}", // ("trx", chain_plug->to_trimmed_trx_string(trx->packed_trx()->get_transaction(), chain))); - _trx_trace_success_log.get_agent_logger()->set_level(spdlog::level::debug); - fc_dlog(_trx_trace_success_log, "[TRX_TRACE] tx: {entire_trace}", - ("entire_trace", get_trace(response))); + + string s; + to_string_visitor v(const_cast(trx->packed_trx()->get_transaction()), s); + fc::reflector::visit(v); + fc_dlog(_trx_log, "[TRX_TRACE - new tool] tx: {trx}", + ("trx", s)); + +// _trx_trace_success_log.get_agent_logger()->set_level(spdlog::level::debug); +// fc_dlog(_trx_trace_success_log, "[TRX_TRACE] tx: {entire_trace}", +// ("entire_trace", get_trace(response))); + } else { fc_dlog(_trx_successful_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: {txid}, auth: {a}", From bcd505525111d7fbd5bb6a5c92e09ad3980564f9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 22 Mar 2022 22:38:31 -0500 Subject: [PATCH 30/31] Add a to_string.hpp which provides a way to get a loggable string for fc_reflected types --- .../chain/include/eosio/chain/to_string.hpp | 244 ++++++++++++++++++ .../chain/include/eosio/chain/transaction.hpp | 11 +- libraries/fc | 2 +- 3 files changed, 248 insertions(+), 9 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/to_string.hpp diff --git a/libraries/chain/include/eosio/chain/to_string.hpp b/libraries/chain/include/eosio/chain/to_string.hpp new file mode 100644 index 00000000000..650886b09f6 --- /dev/null +++ b/libraries/chain/include/eosio/chain/to_string.hpp @@ -0,0 +1,244 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include + +namespace eosio::chain { + +template +struct has_fmt_to_string : std::false_type { +}; + +template +struct has_fmt_to_string() ) )>> : std::true_type { +}; + +template +struct member_pointer_value { + typedef T type; +}; + +template +struct member_pointer_value { + typedef Value type; +}; + +template struct is_template : std::false_type {}; + +template