diff --git a/Builds/CMake/RippledCore.cmake b/Builds/CMake/RippledCore.cmake index 35b147ad491..f8477b68471 100644 --- a/Builds/CMake/RippledCore.cmake +++ b/Builds/CMake/RippledCore.cmake @@ -82,6 +82,7 @@ target_sources (xrpl_core PRIVATE src/ripple/protocol/impl/PublicKey.cpp src/ripple/protocol/impl/Quality.cpp src/ripple/protocol/impl/Rate2.cpp + src/ripple/protocol/impl/Rules.cpp src/ripple/protocol/impl/SField.cpp src/ripple/protocol/impl/SOTemplate.cpp src/ripple/protocol/impl/STAccount.cpp @@ -209,6 +210,7 @@ install ( src/ripple/protocol/PublicKey.h src/ripple/protocol/Quality.h src/ripple/protocol/Rate.h + src/ripple/protocol/Rules.h src/ripple/protocol/SField.h src/ripple/protocol/SOTemplate.h src/ripple/protocol/STAccount.h @@ -400,13 +402,18 @@ target_sources (rippled PRIVATE src/ripple/app/paths/impl/DirectStep.cpp src/ripple/app/paths/impl/PaySteps.cpp src/ripple/app/paths/impl/XRPEndpointStep.cpp - src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp - src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp - src/ripple/app/rdb/impl/RelationalDBInterface.cpp - src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp - src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp - src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp - src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp + src/ripple/app/rdb/backend/detail/impl/Node.cpp + src/ripple/app/rdb/backend/detail/impl/Shard.cpp + src/ripple/app/rdb/backend/impl/PostgresDatabase.cpp + src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp + src/ripple/app/rdb/impl/Download.cpp + src/ripple/app/rdb/impl/PeerFinder.cpp + src/ripple/app/rdb/impl/RelationalDatabase.cpp + src/ripple/app/rdb/impl/ShardArchive.cpp + src/ripple/app/rdb/impl/State.cpp + src/ripple/app/rdb/impl/UnitaryShard.cpp + src/ripple/app/rdb/impl/Vacuum.cpp + src/ripple/app/rdb/impl/Wallet.cpp src/ripple/app/tx/impl/ApplyContext.cpp src/ripple/app/tx/impl/BookTip.cpp src/ripple/app/tx/impl/CancelCheck.cpp diff --git a/Builds/CMake/RippledSanity.cmake b/Builds/CMake/RippledSanity.cmake index 4aaa2e5f8d2..9e7fd113afd 100644 --- a/Builds/CMake/RippledSanity.cmake +++ b/Builds/CMake/RippledSanity.cmake @@ -72,10 +72,8 @@ if ("${CMAKE_CURRENT_SOURCE_DIR}" STREQUAL "${CMAKE_BINARY_DIR}") "directory from ${CMAKE_CURRENT_SOURCE_DIR} and try building in a separate directory.") endif () -if ("${CMAKE_GENERATOR}" MATCHES "Visual Studio" AND - NOT ("${CMAKE_GENERATOR}" MATCHES .*Win64.*)) - message (FATAL_ERROR - "Visual Studio 32-bit build is not supported. Use -G\"${CMAKE_GENERATOR} Win64\"") +if (MSVC AND CMAKE_GENERATOR_PLATFORM STREQUAL "Win32") + message (FATAL_ERROR "Visual Studio 32-bit build is not supported.") endif () if (NOT CMAKE_SIZEOF_VOID_P EQUAL 8) diff --git a/Builds/levelization/results/loops.txt b/Builds/levelization/results/loops.txt index d1838c55c16..cb137f497cb 100644 --- a/Builds/levelization/results/loops.txt +++ b/Builds/levelization/results/loops.txt @@ -14,7 +14,7 @@ Loop: ripple.app ripple.overlay ripple.overlay ~= ripple.app Loop: ripple.app ripple.peerfinder - ripple.peerfinder ~= ripple.app + ripple.app > ripple.peerfinder Loop: ripple.app ripple.rpc ripple.rpc > ripple.app diff --git a/cfg/rippled-example.cfg b/cfg/rippled-example.cfg index f87ad44c7ad..83a9353aa4d 100644 --- a/cfg/rippled-example.cfg +++ b/cfg/rippled-example.cfg @@ -1140,17 +1140,10 @@ # The online delete process checks periodically # that rippled is still in sync with the network, # and that the validated ledger is less than -# 'age_threshold_seconds' old. By default, if it -# is not the online delete process aborts and -# tries again later. If 'recovery_wait_seconds' -# is set and rippled is out of sync, but likely to -# recover quickly, then online delete will wait -# this number of seconds for rippled to get back -# into sync before it aborts. -# Set this value if the node is otherwise staying -# in sync, or recovering quickly, but the online -# delete process is unable to finish. -# Default is unset. +# 'age_threshold_seconds' old. If not, then continue +# sleeping for this number of seconds and +# checking until healthy. +# Default is 5. # # Optional keys for Cassandra: # diff --git a/src/ripple/app/consensus/RCLConsensus.cpp b/src/ripple/app/consensus/RCLConsensus.cpp index be8f2af8cae..79d41581ae3 100644 --- a/src/ripple/app/consensus/RCLConsensus.cpp +++ b/src/ripple/app/consensus/RCLConsensus.cpp @@ -632,7 +632,7 @@ RCLConsensus::Adaptor::doAccept( auto const lastVal = ledgerMaster_.getValidatedLedger(); std::optional rules; if (lastVal) - rules.emplace(*lastVal, app_.config().features); + rules = makeRulesGivenLedger(*lastVal, app_.config().features); else rules.emplace(app_.config().features); app_.openLedger().accept( diff --git a/src/ripple/app/ledger/Ledger.cpp b/src/ripple/app/ledger/Ledger.cpp index 66bea568273..71311448505 100644 --- a/src/ripple/app/ledger/Ledger.cpp +++ b/src/ripple/app/ledger/Ledger.cpp @@ -29,8 +29,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -626,7 +626,7 @@ Ledger::setup(Config const& config) try { - rules_ = Rules(*this, config.features); + rules_ = makeRulesGivenLedger(*this, config.features); } catch (SHAMapMissingNode const&) { @@ -930,9 +930,11 @@ saveValidatedLedger( return true; } - auto res = dynamic_cast( - &app.getRelationalDBInterface()) - ->saveValidatedLedger(ledger, current); + auto const db = dynamic_cast(&app.getRelationalDatabase()); + if (!db) + Throw("Failed to get relational database"); + + auto const res = db->saveValidatedLedger(ledger, current); // Clients can now trust the database for // information about this ledger sequence. @@ -1053,7 +1055,7 @@ std::tuple, std::uint32_t, uint256> getLatestLedger(Application& app) { const std::optional info = - app.getRelationalDBInterface().getNewestLedgerInfo(); + app.getRelationalDatabase().getNewestLedgerInfo(); if (!info) return {std::shared_ptr(), {}, {}}; return {loadLedgerHelper(*info, app, true), info->seq, info->hash}; @@ -1063,7 +1065,7 @@ std::shared_ptr loadByIndex(std::uint32_t ledgerIndex, Application& app, bool acquire) { if (std::optional info = - app.getRelationalDBInterface().getLedgerInfoByIndex(ledgerIndex)) + app.getRelationalDatabase().getLedgerInfoByIndex(ledgerIndex)) { std::shared_ptr ledger = loadLedgerHelper(*info, app, acquire); finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); @@ -1076,7 +1078,7 @@ std::shared_ptr loadByHash(uint256 const& ledgerHash, Application& app, bool acquire) { if (std::optional info = - app.getRelationalDBInterface().getLedgerInfoByHash(ledgerHash)) + app.getRelationalDatabase().getLedgerInfoByHash(ledgerHash)) { std::shared_ptr ledger = loadLedgerHelper(*info, app, acquire); finishLoadByIndexOrHash(ledger, app.config(), app.journal("Ledger")); @@ -1165,9 +1167,12 @@ flatFetchTransactions(ReadView const& ledger, Application& app) return {}; } - auto nodestoreHashes = dynamic_cast( - &app.getRelationalDBInterface()) - ->getTxHashes(ledger.info().seq); + auto const db = + dynamic_cast(&app.getRelationalDatabase()); + if (!db) + Throw("Failed to get relational database"); + + auto nodestoreHashes = db->getTxHashes(ledger.info().seq); return flatFetchTransactions(app, nodestoreHashes); } diff --git a/src/ripple/app/ledger/impl/InboundLedger.cpp b/src/ripple/app/ledger/impl/InboundLedger.cpp index d24c451a12f..3ecba97b199 100644 --- a/src/ripple/app/ledger/impl/InboundLedger.cpp +++ b/src/ripple/app/ledger/impl/InboundLedger.cpp @@ -44,27 +44,27 @@ using namespace std::chrono_literals; enum { // Number of peers to start with - peerCountStart = 4 + peerCountStart = 5 // Number of peers to add on a timeout , - peerCountAdd = 2 + peerCountAdd = 3 // how many timeouts before we give up , - ledgerTimeoutRetriesMax = 10 + ledgerTimeoutRetriesMax = 6 // how many timeouts before we get aggressive , - ledgerBecomeAggressiveThreshold = 6 + ledgerBecomeAggressiveThreshold = 4 // Number of nodes to find initially , - missingNodesFind = 512 + missingNodesFind = 256 // Number of nodes to request for a reply , - reqNodesReply = 256 + reqNodesReply = 128 // Number of nodes to request blindly , @@ -72,7 +72,7 @@ enum { }; // millisecond for each ledger timeout -auto constexpr ledgerAcquireTimeout = 2500ms; +auto constexpr ledgerAcquireTimeout = 3000ms; InboundLedger::InboundLedger( Application& app, @@ -664,15 +664,15 @@ InboundLedger::trigger(std::shared_ptr const& peer, TriggerReason reason) if (reason != TriggerReason::reply) { // If we're querying blind, don't query deep - tmGL.set_querydepth(1); + tmGL.set_querydepth(0); } else if (peer && peer->isHighLatency()) { // If the peer has high latency, query extra deep - tmGL.set_querydepth(3); + tmGL.set_querydepth(2); } else - tmGL.set_querydepth(2); + tmGL.set_querydepth(1); // Get the state data first because it's the most likely to be useful // if we wind up abandoning this fetch. diff --git a/src/ripple/app/ledger/impl/LedgerMaster.cpp b/src/ripple/app/ledger/impl/LedgerMaster.cpp index 3bae67f655d..ad08b18dd58 100644 --- a/src/ripple/app/ledger/impl/LedgerMaster.cpp +++ b/src/ripple/app/ledger/impl/LedgerMaster.cpp @@ -34,8 +34,7 @@ #include #include #include -#include -#include +#include #include #include #include @@ -278,10 +277,10 @@ LedgerMaster::getValidatedLedgerAge() #ifdef RIPPLED_REPORTING if (app_.config().reporting()) - return static_cast( - &app_.getRelationalDBInterface()) + return static_cast(&app_.getRelationalDatabase()) ->getValidatedLedgerAge(); #endif + std::chrono::seconds valClose{mValidLedgerSign.load()}; if (valClose == 0s) { @@ -309,8 +308,7 @@ LedgerMaster::isCaughtUp(std::string& reason) #ifdef RIPPLED_REPORTING if (app_.config().reporting()) - return static_cast( - &app_.getRelationalDBInterface()) + return static_cast(&app_.getRelationalDatabase()) ->isCaughtUp(reason); #endif @@ -743,7 +741,7 @@ LedgerMaster::tryFill(std::shared_ptr ledger) mCompleteLedgers.insert(range(minHas, maxHas)); } maxHas = minHas; - ledgerHashes = app_.getRelationalDBInterface().getHashesByIndex( + ledgerHashes = app_.getRelationalDatabase().getHashesByIndex( (seq < 500) ? 0 : (seq - 499), seq); it = ledgerHashes.find(seq); @@ -927,8 +925,8 @@ LedgerMaster::setFullLedger( { // Check the SQL database's entry for the sequence before this // ledger, if it's not this ledger's parent, invalidate it - uint256 prevHash = app_.getRelationalDBInterface().getHashByIndex( - ledger->info().seq - 1); + uint256 prevHash = + app_.getRelationalDatabase().getHashByIndex(ledger->info().seq - 1); if (prevHash.isNonZero() && prevHash != ledger->info().parentHash) clearLedger(ledger->info().seq - 1); } @@ -1664,7 +1662,7 @@ LedgerMaster::getValidatedLedger() #ifdef RIPPLED_REPORTING if (app_.config().reporting()) { - auto seq = app_.getRelationalDBInterface().getMaxLedgerSeq(); + auto seq = app_.getRelationalDatabase().getMaxLedgerSeq(); if (!seq) return {}; return getLedgerBySeq(*seq); @@ -1700,8 +1698,7 @@ LedgerMaster::getCompleteLedgers() { #ifdef RIPPLED_REPORTING if (app_.config().reporting()) - return static_cast( - &app_.getRelationalDBInterface()) + return static_cast(&app_.getRelationalDatabase()) ->getCompleteLedgers(); #endif std::lock_guard sl(mCompleteLock); @@ -1746,7 +1743,7 @@ LedgerMaster::getHashBySeq(std::uint32_t index) if (hash.isNonZero()) return hash; - return app_.getRelationalDBInterface().getHashByIndex(index); + return app_.getRelationalDatabase().getHashByIndex(index); } std::optional @@ -1967,7 +1964,7 @@ LedgerMaster::fetchForHistory( fillInProgress = mFillInProgress; } if (fillInProgress == 0 && - app_.getRelationalDBInterface().getHashByIndex(seq - 1) == + app_.getRelationalDatabase().getHashByIndex(seq - 1) == ledger->info().parentHash) { { @@ -2363,7 +2360,7 @@ LedgerMaster::getFetchPackCacheSize() const std::optional LedgerMaster::minSqlSeq() { - return app_.getRelationalDBInterface().getMinLedgerSeq(); + return app_.getRelationalDatabase().getMinLedgerSeq(); } } // namespace ripple diff --git a/src/ripple/app/main/Application.cpp b/src/ripple/app/main/Application.cpp index 2256ee31ba9..b7f18ba34db 100644 --- a/src/ripple/app/main/Application.cpp +++ b/src/ripple/app/main/Application.cpp @@ -45,8 +45,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -219,7 +219,7 @@ class ApplicationImp : public Application, public BasicApp boost::asio::steady_timer sweepTimer_; boost::asio::steady_timer entropyTimer_; - std::unique_ptr mRelationalDBInterface; + std::unique_ptr mRelationalDatabase; std::unique_ptr mWalletDB; std::unique_ptr overlay_; @@ -877,11 +877,11 @@ class ApplicationImp : public Application, public BasicApp return *txQ_; } - RelationalDBInterface& - getRelationalDBInterface() override + RelationalDatabase& + getRelationalDatabase() override { - assert(mRelationalDBInterface.get() != nullptr); - return *mRelationalDBInterface; + assert(mRelationalDatabase.get() != nullptr); + return *mRelationalDatabase; } DatabaseCon& @@ -907,14 +907,14 @@ class ApplicationImp : public Application, public BasicApp //-------------------------------------------------------------------------- bool - initRDBMS() + initRelationalDatabase() { assert(mWalletDB.get() == nullptr); try { - mRelationalDBInterface = - RelationalDBInterface::init(*this, *config_, *m_jobQueue); + mRelationalDatabase = + RelationalDatabase::init(*this, *config_, *m_jobQueue); // wallet database auto setup = setup_DatabaseCon(*config_, m_journal); @@ -1041,7 +1041,7 @@ class ApplicationImp : public Application, public BasicApp doSweep() { if (!config_->standalone() && - !getRelationalDBInterface().transactionDbHasSpace(*config_)) + !getRelationalDatabase().transactionDbHasSpace(*config_)) { signalStop(); } @@ -1066,8 +1066,7 @@ class ApplicationImp : public Application, public BasicApp cachedSLEs_.sweep(); #ifdef RIPPLED_REPORTING - if (auto pg = dynamic_cast( - &*mRelationalDBInterface)) + if (auto pg = dynamic_cast(&*mRelationalDatabase)) pg->sweep(); #endif @@ -1162,7 +1161,7 @@ ApplicationImp::setup() if (!config_->standalone()) timeKeeper_->run(config_->SNTP_SERVERS); - if (!initRDBMS() || !initNodeStore()) + if (!initRelationalDatabase() || !initNodeStore()) return false; if (shardStore_) @@ -1619,8 +1618,7 @@ ApplicationImp::run() ledgerCleaner_->stop(); if (reportingETL_) reportingETL_->stop(); - if (auto pg = dynamic_cast( - &*mRelationalDBInterface)) + if (auto pg = dynamic_cast(&*mRelationalDatabase)) pg->stop(); m_nodeStore->stop(); perfLog_->stop(); @@ -2137,7 +2135,7 @@ ApplicationImp::nodeToShards() void ApplicationImp::setMaxDisallowedLedger() { - auto seq = getRelationalDBInterface().getMaxLedgerSeq(); + auto seq = getRelationalDatabase().getMaxLedgerSeq(); if (seq) maxDisallowedLedger_ = *seq; diff --git a/src/ripple/app/main/Application.h b/src/ripple/app/main/Application.h index 0fc927ff65a..53155ca4f8d 100644 --- a/src/ripple/app/main/Application.h +++ b/src/ripple/app/main/Application.h @@ -99,7 +99,7 @@ class ValidatorList; class ValidatorSite; class Cluster; -class RelationalDBInterface; +class RelationalDatabase; class DatabaseCon; class SHAMapStore; @@ -251,8 +251,8 @@ class Application : public beast::PropertyStream::Source openLedger() = 0; virtual OpenLedger const& openLedger() const = 0; - virtual RelationalDBInterface& - getRelationalDBInterface() = 0; + virtual RelationalDatabase& + getRelationalDatabase() = 0; virtual std::chrono::milliseconds getIOLatency() = 0; diff --git a/src/ripple/app/main/DBInit.h b/src/ripple/app/main/DBInit.h index 00cfc104df0..3d2f42717b2 100644 --- a/src/ripple/app/main/DBInit.h +++ b/src/ripple/app/main/DBInit.h @@ -72,12 +72,22 @@ inline constexpr std::array LgrDBInit{ // Transaction database holds transactions and public keys inline constexpr auto TxDBName{"transaction.db"}; -inline constexpr std::array TxDBPragma +// In C++17 omitting the explicit template parameters caused +// a crash +inline constexpr std::array TxDBPragma { "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", "PRAGMA max_page_count=2147483646;", + #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) "PRAGMA mmap_size=17179869184;" +#else + + // Provide an explicit `no-op` SQL statement + // in order to keep the size of the array + // constant regardless of the preprocessor + // condition evaluation + "PRAGMA sqlite_noop_statement;" #endif }; @@ -117,12 +127,22 @@ inline constexpr std::array TxDBInit{ // The Ledger Meta database maps ledger hashes to shard indexes inline constexpr auto LgrMetaDBName{"ledger_meta.db"}; -inline constexpr std::array LgrMetaDBPragma +// In C++17 omitting the explicit template parameters caused +// a crash +inline constexpr std::array LgrMetaDBPragma { "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", "PRAGMA max_page_count=2147483646;", + #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) "PRAGMA mmap_size=17179869184;" +#else + + // Provide an explicit `no-op` SQL statement + // in order to keep the size of the array + // constant regardless of the preprocessor + // condition evaluation + "PRAGMA sqlite_noop_statement;" #endif }; @@ -141,12 +161,22 @@ inline constexpr std::array LgrMetaDBInit{ // Transaction Meta database maps transaction IDs to shard indexes inline constexpr auto TxMetaDBName{"transaction_meta.db"}; -inline constexpr std::array TxMetaDBPragma +// In C++17 omitting the explicit template parameters caused +// a crash +inline constexpr std::array TxMetaDBPragma { "PRAGMA page_size=4096;", "PRAGMA journal_size_limit=1582080;", "PRAGMA max_page_count=2147483646;", + #if (ULONG_MAX > UINT_MAX) && !defined(NO_SQLITE_MMAP) "PRAGMA mmap_size=17179869184;" +#else + + // Provide an explicit `no-op` SQL statement + // in order to keep the size of the array + // constant regardless of the preprocessor + // condition evaluation + "PRAGMA sqlite_noop_statement;" #endif }; diff --git a/src/ripple/app/main/Main.cpp b/src/ripple/app/main/Main.cpp index 14befa63ea8..c08836e79e7 100644 --- a/src/ripple/app/main/Main.cpp +++ b/src/ripple/app/main/Main.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/ripple/app/main/NodeIdentity.cpp b/src/ripple/app/main/NodeIdentity.cpp index 5f7cca7a594..a2051bbb674 100644 --- a/src/ripple/app/main/NodeIdentity.cpp +++ b/src/ripple/app/main/NodeIdentity.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/ripple/app/misc/NetworkOPs.cpp b/src/ripple/app/misc/NetworkOPs.cpp index 4b44cf431c7..9203a83ad41 100644 --- a/src/ripple/app/misc/NetworkOPs.cpp +++ b/src/ripple/app/misc/NetworkOPs.cpp @@ -37,9 +37,8 @@ #include #include #include -#include -#include -#include +#include +#include #include #include #include @@ -1749,7 +1748,7 @@ NetworkOPsImp::switchLastClosedLedger( auto const lastVal = app_.getLedgerMaster().getValidatedLedger(); std::optional rules; if (lastVal) - rules.emplace(*lastVal, app_.config().features); + rules = makeRulesGivenLedger(*lastVal, app_.config().features); else rules.emplace(app_.config().features); app_.openLedger().accept( @@ -3366,8 +3365,9 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) #ifdef RIPPLED_REPORTING if (app_.config().reporting()) { - if (dynamic_cast( - &app_.getRelationalDBInterface())) + // Use a dynamic_cast to return DatabaseType::None + // on failure. + if (dynamic_cast(&app_.getRelationalDatabase())) { return DatabaseType::Postgres; } @@ -3375,16 +3375,18 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) } else { - if (dynamic_cast( - &app_.getRelationalDBInterface())) + // Use a dynamic_cast to return DatabaseType::None + // on failure. + if (dynamic_cast(&app_.getRelationalDatabase())) { return DatabaseType::Sqlite; } return DatabaseType::None; } #else - if (dynamic_cast( - &app_.getRelationalDBInterface())) + // Use a dynamic_cast to return DatabaseType::None + // on failure. + if (dynamic_cast(&app_.getRelationalDatabase())) { return DatabaseType::Sqlite; } @@ -3470,17 +3472,16 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) auto getMoreTxns = [&](std::uint32_t minLedger, std::uint32_t maxLedger, - std::optional - marker) + std::optional marker) -> std::optional>> { + RelationalDatabase::AccountTxs, + std::optional>> { switch (dbType) { case Postgres: { - auto db = static_cast( - &app_.getRelationalDBInterface()); - RelationalDBInterface::AccountTxArgs args; + auto db = static_cast( + &app_.getRelationalDatabase()); + RelationalDatabase::AccountTxArgs args; args.account = accountId; LedgerRange range{minLedger, maxLedger}; args.ledger = range; @@ -3496,7 +3497,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) } if (auto txns = - std::get_if( + std::get_if( &txResult.transactions); txns) { @@ -3512,9 +3513,9 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) } } case Sqlite: { - auto db = static_cast( - &app_.getRelationalDBInterface()); - RelationalDBInterface::AccountTxPageOptions options{ + auto db = static_cast( + &app_.getRelationalDatabase()); + RelationalDatabase::AccountTxPageOptions options{ accountId, minLedger, maxLedger, marker, 0, true}; return db->newestAccountTxPage(options); } @@ -3575,7 +3576,7 @@ NetworkOPsImp::addAccountHistoryJob(SubAccountHistoryInfoWeak subInfo) return; } - std::optional marker{}; + std::optional marker{}; while (!subInfo.index_->stopHistorical_) { auto dbResult = diff --git a/src/ripple/app/misc/SHAMapStore.h b/src/ripple/app/misc/SHAMapStore.h index 7a999012c37..c42e5f5a52a 100644 --- a/src/ripple/app/misc/SHAMapStore.h +++ b/src/ripple/app/misc/SHAMapStore.h @@ -55,7 +55,7 @@ class SHAMapStore clampFetchDepth(std::uint32_t fetch_depth) const = 0; virtual std::unique_ptr - makeNodeStore(std::int32_t readThreads) = 0; + makeNodeStore(int readThreads) = 0; /** Highest ledger that may be deleted. */ virtual LedgerIndex diff --git a/src/ripple/app/misc/SHAMapStoreImp.cpp b/src/ripple/app/misc/SHAMapStoreImp.cpp index 56a817934c7..70519fc92ce 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.cpp +++ b/src/ripple/app/misc/SHAMapStoreImp.cpp @@ -20,8 +20,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -138,7 +138,7 @@ SHAMapStoreImp::SHAMapStoreImp( if (get_if_exists(section, "age_threshold_seconds", temp)) ageThreshold_ = std::chrono::seconds{temp}; if (get_if_exists(section, "recovery_wait_seconds", temp)) - recoveryWaitTime_.emplace(std::chrono::seconds{temp}); + recoveryWaitTime_ = std::chrono::seconds{temp}; get_if_exists(section, "advisory_delete", advisoryDelete_); @@ -166,7 +166,7 @@ SHAMapStoreImp::SHAMapStoreImp( } std::unique_ptr -SHAMapStoreImp::makeNodeStore(std::int32_t readThreads) +SHAMapStoreImp::makeNodeStore(int readThreads) { auto nscfg = app_.config().section(ConfigSection::nodeDatabase()); @@ -268,7 +268,7 @@ SHAMapStoreImp::copyNode(std::uint64_t& nodeCount, SHAMapTreeNode const& node) true); if (!(++nodeCount % checkHealthInterval_)) { - if (health()) + if (stopping()) return false; } @@ -326,7 +326,7 @@ SHAMapStoreImp::run() bool const readyToRotate = validatedSeq >= lastRotated + deleteInterval_ && - canDelete_ >= lastRotated - 1 && !health(); + canDelete_ >= lastRotated - 1 && !stopping(); // Make sure we don't delete ledgers currently being // imported into the ShardStore @@ -358,15 +358,8 @@ SHAMapStoreImp::run() << ledgerMaster_->getValidatedLedgerAge().count() << 's'; clearPrior(lastRotated); - switch (health()) - { - case Health::stopping: - return; - case Health::unhealthy: - continue; - case Health::ok: - default:; - } + if (stopping()) + return; JLOG(journal_.debug()) << "copying ledger " << validatedSeq; std::uint64_t nodeCount = 0; @@ -375,30 +368,16 @@ SHAMapStoreImp::run() this, std::ref(nodeCount), std::placeholders::_1)); - switch (health()) - { - case Health::stopping: - return; - case Health::unhealthy: - continue; - case Health::ok: - default:; - } + if (stopping()) + return; // Only log if we completed without a "health" abort JLOG(journal_.debug()) << "copied ledger " << validatedSeq << " nodecount " << nodeCount; JLOG(journal_.debug()) << "freshening caches"; freshenCaches(); - switch (health()) - { - case Health::stopping: - return; - case Health::unhealthy: - continue; - case Health::ok: - default:; - } + if (stopping()) + return; // Only log if we completed without a "health" abort JLOG(journal_.debug()) << validatedSeq << " freshened caches"; @@ -408,15 +387,8 @@ SHAMapStoreImp::run() << validatedSeq << " new backend " << newBackend->getName(); clearCaches(validatedSeq); - switch (health()) - { - case Health::stopping: - return; - case Health::unhealthy: - continue; - case Health::ok: - default:; - } + if (stopping()) + return; lastRotated = validatedSeq; @@ -580,7 +552,7 @@ SHAMapStoreImp::clearSql( min = *m; } - if (min > lastRotated || health() != Health::ok) + if (min > lastRotated || stopping()) return; if (min == lastRotated) { @@ -601,11 +573,11 @@ SHAMapStoreImp::clearSql( JLOG(journal_.trace()) << "End: Delete up to " << deleteBatch_ << " rows with LedgerSeq < " << min << " from: " << TableName; - if (health()) + if (stopping()) return; if (min < lastRotated) std::this_thread::sleep_for(backOff_); - if (health()) + if (stopping()) return; } JLOG(journal_.debug()) << "finished deleting from: " << TableName; @@ -645,23 +617,21 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) ledgerMaster_->clearPriorLedgers(lastRotated); JLOG(journal_.trace()) << "End: Clear internal ledgers up to " << lastRotated; - if (health()) + if (stopping()) return; - RelationalDBInterfaceSqlite* iface = - dynamic_cast( - &app_.getRelationalDBInterface()); + SQLiteDatabase* const db = + dynamic_cast(&app_.getRelationalDatabase()); + + if (!db) + Throw("Failed to get relational database"); clearSql( lastRotated, "Ledgers", - [&iface]() -> std::optional { - return iface->getMinLedgerSeq(); - }, - [&iface](LedgerIndex min) -> void { - iface->deleteBeforeLedgerSeq(min); - }); - if (health()) + [db]() -> std::optional { return db->getMinLedgerSeq(); }, + [db](LedgerIndex min) -> void { db->deleteBeforeLedgerSeq(min); }); + if (stopping()) return; if (!app_.config().useTxTables()) @@ -670,70 +640,48 @@ SHAMapStoreImp::clearPrior(LedgerIndex lastRotated) clearSql( lastRotated, "Transactions", - [&iface]() -> std::optional { - return iface->getTransactionsMinLedgerSeq(); + [&db]() -> std::optional { + return db->getTransactionsMinLedgerSeq(); }, - [&iface](LedgerIndex min) -> void { - iface->deleteTransactionsBeforeLedgerSeq(min); + [&db](LedgerIndex min) -> void { + db->deleteTransactionsBeforeLedgerSeq(min); }); - if (health()) + if (stopping()) return; clearSql( lastRotated, "AccountTransactions", - [&iface]() -> std::optional { - return iface->getAccountTransactionsMinLedgerSeq(); + [&db]() -> std::optional { + return db->getAccountTransactionsMinLedgerSeq(); }, - [&iface](LedgerIndex min) -> void { - iface->deleteAccountTransactionsBeforeLedgerSeq(min); + [&db](LedgerIndex min) -> void { + db->deleteAccountTransactionsBeforeLedgerSeq(min); }); - if (health()) + if (stopping()) return; } -SHAMapStoreImp::Health -SHAMapStoreImp::health() +bool +SHAMapStoreImp::stopping() { + auto age = ledgerMaster_->getValidatedLedgerAge(); + OperatingMode mode = netOPs_->getOperatingMode(); + std::unique_lock lock(mutex_); + while (!stop_ && (mode != OperatingMode::FULL || age > ageThreshold_)) { - std::lock_guard lock(mutex_); - if (stop_) - return Health::stopping; + lock.unlock(); + JLOG(journal_.warn()) << "Waiting " << recoveryWaitTime_.count() + << "s for node to stabilize. state: " + << app_.getOPs().strOperatingMode(mode, false) + << ". age " << age.count() << 's'; + std::this_thread::sleep_for(recoveryWaitTime_); + age = ledgerMaster_->getValidatedLedgerAge(); + mode = netOPs_->getOperatingMode(); + lock.lock(); } - if (!netOPs_) - return Health::ok; - assert(deleteInterval_); - if (healthy_) - { - auto age = ledgerMaster_->getValidatedLedgerAge(); - OperatingMode mode = netOPs_->getOperatingMode(); - if (recoveryWaitTime_ && mode == OperatingMode::SYNCING && - age < ageThreshold_) - { - JLOG(journal_.warn()) - << "Waiting " << recoveryWaitTime_->count() - << "s for node to get back into sync with network. state: " - << app_.getOPs().strOperatingMode(mode, false) << ". age " - << age.count() << 's'; - std::this_thread::sleep_for(*recoveryWaitTime_); - - age = ledgerMaster_->getValidatedLedgerAge(); - mode = netOPs_->getOperatingMode(); - } - if (mode != OperatingMode::FULL || age > ageThreshold_) - { - JLOG(journal_.warn()) << "Not deleting. state: " - << app_.getOPs().strOperatingMode(mode, false) - << ". age " << age.count() << 's'; - healthy_ = false; - } - } - - if (healthy_) - return Health::ok; - else - return Health::unhealthy; + return stop_; } void diff --git a/src/ripple/app/misc/SHAMapStoreImp.h b/src/ripple/app/misc/SHAMapStoreImp.h index e3528faaada..a2d7b3006a8 100644 --- a/src/ripple/app/misc/SHAMapStoreImp.h +++ b/src/ripple/app/misc/SHAMapStoreImp.h @@ -22,8 +22,8 @@ #include #include -#include -#include +#include +#include #include #include @@ -40,8 +40,6 @@ class NetworkOPs; class SHAMapStoreImp : public SHAMapStore { private: - enum Health : std::uint8_t { ok = 0, stopping, unhealthy }; - class SavedStateDB { public: @@ -106,12 +104,12 @@ class SHAMapStoreImp : public SHAMapStore std::uint32_t deleteBatch_ = 100; std::chrono::milliseconds backOff_{100}; std::chrono::seconds ageThreshold_{60}; - /// If set, and the node is out of sync during an + /// If the node is out of sync during an /// online_delete health check, sleep the thread - /// for this time and check again so the node can - /// recover. + /// for this time, and continue checking until + /// recovery. /// See also: "recovery_wait_seconds" in rippled-example.cfg - std::optional recoveryWaitTime_; + std::chrono::seconds recoveryWaitTime_{5}; // these do not exist upon SHAMapStore creation, but do exist // as of run() or before @@ -136,7 +134,7 @@ class SHAMapStoreImp : public SHAMapStore } std::unique_ptr - makeNodeStore(std::int32_t readThreads) override; + makeNodeStore(int readThreads) override; LedgerIndex setCanDelete(LedgerIndex seq) override @@ -201,7 +199,7 @@ class SHAMapStoreImp : public SHAMapStore { dbRotating_->fetchNodeObject( key, 0, NodeStore::FetchType::synchronous, true); - if (!(++check % checkHealthInterval_) && health()) + if (!(++check % checkHealthInterval_) && stopping()) return true; } @@ -225,16 +223,15 @@ class SHAMapStoreImp : public SHAMapStore void clearPrior(LedgerIndex lastRotated); - // If rippled is not healthy, defer rotate-delete. - // If already unhealthy, do not change state on further check. - // Assume that, once unhealthy, a necessary step has been - // aborted, so the online-delete process needs to restart - // at next ledger. - // If recoveryWaitTime_ is set, this may sleep to give rippled - // time to recover, so never call it from any thread other than - // the main "run()". - Health - health(); + /** + * This is a health check for online deletion that waits until rippled is + * stable until returning. If the server is stopping, then it returns + * "true" to inform the caller to allow the server to stop. + * + * @return Whether the server is stopping. + */ + bool + stopping(); public: void diff --git a/src/ripple/app/misc/impl/AccountTxPaging.cpp b/src/ripple/app/misc/impl/AccountTxPaging.cpp index 5c1e8017018..433463e2826 100644 --- a/src/ripple/app/misc/impl/AccountTxPaging.cpp +++ b/src/ripple/app/misc/impl/AccountTxPaging.cpp @@ -31,7 +31,7 @@ namespace ripple { void convertBlobsToTxResult( - RelationalDBInterface::AccountTxs& to, + RelationalDatabase::AccountTxs& to, std::uint32_t ledger_index, std::string const& status, Blob const& rawTxn, diff --git a/src/ripple/app/misc/impl/AccountTxPaging.h b/src/ripple/app/misc/impl/AccountTxPaging.h index ad3c40e56f7..6b8f235b5a8 100644 --- a/src/ripple/app/misc/impl/AccountTxPaging.h +++ b/src/ripple/app/misc/impl/AccountTxPaging.h @@ -20,7 +20,7 @@ #ifndef RIPPLE_APP_MISC_IMPL_ACCOUNTTXPAGING_H_INCLUDED #define RIPPLE_APP_MISC_IMPL_ACCOUNTTXPAGING_H_INCLUDED -#include +#include #include #include #include @@ -31,7 +31,7 @@ namespace ripple { void convertBlobsToTxResult( - RelationalDBInterface::AccountTxs& to, + RelationalDatabase::AccountTxs& to, std::uint32_t ledger_index, std::string const& status, Blob const& rawTxn, diff --git a/src/ripple/app/misc/impl/AmendmentTable.cpp b/src/ripple/app/misc/impl/AmendmentTable.cpp index be59320be4b..5f75d29540a 100644 --- a/src/ripple/app/misc/impl/AmendmentTable.cpp +++ b/src/ripple/app/misc/impl/AmendmentTable.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include diff --git a/src/ripple/app/misc/impl/Manifest.cpp b/src/ripple/app/misc/impl/Manifest.cpp index d5fcde19e3f..931b632159c 100644 --- a/src/ripple/app/misc/impl/Manifest.cpp +++ b/src/ripple/app/misc/impl/Manifest.cpp @@ -18,7 +18,7 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/ripple/app/misc/impl/Transaction.cpp b/src/ripple/app/misc/impl/Transaction.cpp index ee391c7a9ec..9adef982d01 100644 --- a/src/ripple/app/misc/impl/Transaction.cpp +++ b/src/ripple/app/misc/impl/Transaction.cpp @@ -21,9 +21,8 @@ #include #include #include -#include -#include -#include +#include +#include #include #include #include @@ -134,9 +133,15 @@ Transaction::load( Transaction::Locator Transaction::locate(uint256 const& id, Application& app) { - return dynamic_cast( - &app.getRelationalDBInterface()) - ->locateTransaction(id); + auto const db = + dynamic_cast(&app.getRelationalDatabase()); + + if (!db) + { + Throw("Failed to get relational database"); + } + + return db->locateTransaction(id); } std::variant< @@ -148,9 +153,14 @@ Transaction::load( std::optional> const& range, error_code_i& ec) { - return dynamic_cast( - &app.getRelationalDBInterface()) - ->getTransaction(id, range, ec); + auto const db = dynamic_cast(&app.getRelationalDatabase()); + + if (!db) + { + Throw("Failed to get relational database"); + } + + return db->getTransaction(id, range, ec); } // options 1 to include the date of the transaction diff --git a/src/ripple/app/paths/AccountCurrencies.cpp b/src/ripple/app/paths/AccountCurrencies.cpp index 2892ff869c9..18452725b67 100644 --- a/src/ripple/app/paths/AccountCurrencies.cpp +++ b/src/ripple/app/paths/AccountCurrencies.cpp @@ -33,18 +33,23 @@ accountSourceCurrencies( if (includeXRP) currencies.insert(xrpCurrency()); - for (auto const& rspEntry : lrCache->getRippleLines(account)) + if (auto const lines = + lrCache->getRippleLines(account, LineDirection::outgoing)) { - auto& saBalance = rspEntry.getBalance(); - - // Filter out non - if (saBalance > beast::zero - // Have IOUs to send. - || (rspEntry.getLimitPeer() - // Peer extends credit. - && ((-saBalance) < rspEntry.getLimitPeer()))) // Credit left. + for (auto const& rspEntry : *lines) { - currencies.insert(saBalance.getCurrency()); + auto& saBalance = rspEntry.getBalance(); + + // Filter out non + if (saBalance > beast::zero + // Have IOUs to send. + || + (rspEntry.getLimitPeer() + // Peer extends credit. + && ((-saBalance) < rspEntry.getLimitPeer()))) // Credit left. + { + currencies.insert(saBalance.getCurrency()); + } } } @@ -64,12 +69,16 @@ accountDestCurrencies( currencies.insert(xrpCurrency()); // Even if account doesn't exist - for (auto const& rspEntry : lrCache->getRippleLines(account)) + if (auto const lines = + lrCache->getRippleLines(account, LineDirection::outgoing)) { - auto& saBalance = rspEntry.getBalance(); + for (auto const& rspEntry : *lines) + { + auto& saBalance = rspEntry.getBalance(); - if (saBalance < rspEntry.getLimit()) // Can take more - currencies.insert(saBalance.getCurrency()); + if (saBalance < rspEntry.getLimit()) // Can take more + currencies.insert(saBalance.getCurrency()); + } } currencies.erase(badCurrency()); diff --git a/src/ripple/app/paths/PathRequest.cpp b/src/ripple/app/paths/PathRequest.cpp index e5b15fd9d01..d1acb3ac1fd 100644 --- a/src/ripple/app/paths/PathRequest.cpp +++ b/src/ripple/app/paths/PathRequest.cpp @@ -748,6 +748,8 @@ PathRequest::doUpdate( jvStatus = newStatus; } + JLOG(m_journal.debug()) + << iIdentifier << " update finished " << (fast ? "fast" : "normal"); return newStatus; } diff --git a/src/ripple/app/paths/Pathfinder.cpp b/src/ripple/app/paths/Pathfinder.cpp index 4e81bebd3c3..71a4afa7563 100644 --- a/src/ripple/app/paths/Pathfinder.cpp +++ b/src/ripple/app/paths/Pathfinder.cpp @@ -708,6 +708,7 @@ int Pathfinder::getPathsOut( Currency const& currency, AccountID const& account, + LineDirection direction, bool isDstCurrency, AccountID const& dstAccount, std::function const& continueCallback) @@ -735,33 +736,37 @@ Pathfinder::getPathsOut( { count = app_.getOrderBookDB().getBookSize(issue); - for (auto const& rspEntry : mRLCache->getRippleLines(account)) + if (auto const lines = mRLCache->getRippleLines(account, direction)) { - if (currency != rspEntry.getLimit().getCurrency()) + for (auto const& rspEntry : *lines) { - } - else if ( - rspEntry.getBalance() <= beast::zero && - (!rspEntry.getLimitPeer() || - -rspEntry.getBalance() >= rspEntry.getLimitPeer() || - (bAuthRequired && !rspEntry.getAuth()))) - { - } - else if (isDstCurrency && dstAccount == rspEntry.getAccountIDPeer()) - { - count += 10000; // count a path to the destination extra - } - else if (rspEntry.getNoRipplePeer()) - { - // This probably isn't a useful path out - } - else if (rspEntry.getFreezePeer()) - { - // Not a useful path out - } - else - { - ++count; + if (currency != rspEntry.getLimit().getCurrency()) + { + } + else if ( + rspEntry.getBalance() <= beast::zero && + (!rspEntry.getLimitPeer() || + -rspEntry.getBalance() >= rspEntry.getLimitPeer() || + (bAuthRequired && !rspEntry.getAuth()))) + { + } + else if ( + isDstCurrency && dstAccount == rspEntry.getAccountIDPeer()) + { + count += 10000; // count a path to the destination extra + } + else if (rspEntry.getNoRipplePeer()) + { + // This probably isn't a useful path out + } + else if (rspEntry.getFreezePeer()) + { + // Not a useful path out + } + else + { + ++count; + } } } } @@ -976,117 +981,128 @@ Pathfinder::addLink( bool const bIsNoRippleOut(isNoRippleOut(currentPath)); bool const bDestOnly(addFlags & afAC_LAST); - auto& rippleLines(mRLCache->getRippleLines(uEndAccount)); - - AccountCandidates candidates; - candidates.reserve(rippleLines.size()); - - for (auto const& rs : rippleLines) + if (auto const lines = mRLCache->getRippleLines( + uEndAccount, + bIsNoRippleOut ? LineDirection::incoming + : LineDirection::outgoing)) { - if (continueCallback && !continueCallback()) - return; - auto const& acct = rs.getAccountIDPeer(); + auto& rippleLines = *lines; - if (hasEffectiveDestination && (acct == mDstAccount)) - { - // We skipped the gateway - continue; - } - - bool bToDestination = acct == mEffectiveDst; + AccountCandidates candidates; + candidates.reserve(rippleLines.size()); - if (bDestOnly && !bToDestination) + for (auto const& rs : rippleLines) { - continue; - } + if (continueCallback && !continueCallback()) + return; + auto const& acct = rs.getAccountIDPeer(); + LineDirection const direction = rs.getDirectionPeer(); - if ((uEndCurrency == rs.getLimit().getCurrency()) && - !currentPath.hasSeen(acct, uEndCurrency, acct)) - { - // path is for correct currency and has not been seen - if (rs.getBalance() <= beast::zero && - (!rs.getLimitPeer() || - -rs.getBalance() >= rs.getLimitPeer() || - (bRequireAuth && !rs.getAuth()))) + if (hasEffectiveDestination && (acct == mDstAccount)) { - // path has no credit + // We skipped the gateway + continue; } - else if (bIsNoRippleOut && rs.getNoRipple()) + + bool bToDestination = acct == mEffectiveDst; + + if (bDestOnly && !bToDestination) { - // Can't leave on this path + continue; } - else if (bToDestination) + + if ((uEndCurrency == rs.getLimit().getCurrency()) && + !currentPath.hasSeen(acct, uEndCurrency, acct)) { - // destination is always worth trying - if (uEndCurrency == mDstAmount.getCurrency()) + // path is for correct currency and has not been + // seen + if (rs.getBalance() <= beast::zero && + (!rs.getLimitPeer() || + -rs.getBalance() >= rs.getLimitPeer() || + (bRequireAuth && !rs.getAuth()))) + { + // path has no credit + } + else if (bIsNoRippleOut && rs.getNoRipple()) + { + // Can't leave on this path + } + else if (bToDestination) { - // this is a complete path - if (!currentPath.empty()) + // destination is always worth trying + if (uEndCurrency == mDstAmount.getCurrency()) { - JLOG(j_.trace()) - << "complete path found ae: " - << currentPath.getJson( - JsonOptions::none); - addUniquePath(mCompletePaths, currentPath); + // this is a complete path + if (!currentPath.empty()) + { + JLOG(j_.trace()) + << "complete path found ae: " + << currentPath.getJson( + JsonOptions::none); + addUniquePath( + mCompletePaths, currentPath); + } + } + else if (!bDestOnly) + { + // this is a high-priority candidate + candidates.push_back( + {AccountCandidate::highPriority, acct}); } } - else if (!bDestOnly) + else if (acct == mSrcAccount) { - // this is a high-priority candidate - candidates.push_back( - {AccountCandidate::highPriority, acct}); + // going back to the source is bad + } + else + { + // save this candidate + int out = getPathsOut( + uEndCurrency, + acct, + direction, + bIsEndCurrency, + mEffectiveDst, + continueCallback); + if (out) + candidates.push_back({out, acct}); } - } - else if (acct == mSrcAccount) - { - // going back to the source is bad - } - else - { - // save this candidate - int out = getPathsOut( - uEndCurrency, - acct, - bIsEndCurrency, - mEffectiveDst, - continueCallback); - if (out) - candidates.push_back({out, acct}); } } - } - if (!candidates.empty()) - { - std::sort( - candidates.begin(), - candidates.end(), - std::bind( - compareAccountCandidate, - mLedger->seq(), - std::placeholders::_1, - std::placeholders::_2)); - - int count = candidates.size(); - // allow more paths from source - if ((count > 10) && (uEndAccount != mSrcAccount)) - count = 10; - else if (count > 50) - count = 50; - - auto it = candidates.begin(); - while (count-- != 0) + if (!candidates.empty()) { - if (continueCallback && !continueCallback()) - return; - // Add accounts to incompletePaths - STPathElement pathElement( - STPathElement::typeAccount, - it->account, - uEndCurrency, - it->account); - incompletePaths.assembleAdd(currentPath, pathElement); - ++it; + std::sort( + candidates.begin(), + candidates.end(), + std::bind( + compareAccountCandidate, + mLedger->seq(), + std::placeholders::_1, + std::placeholders::_2)); + + int count = candidates.size(); + // allow more paths from source + if ((count > 10) && (uEndAccount != mSrcAccount)) + count = 10; + else if (count > 50) + count = 50; + + auto it = candidates.begin(); + while (count-- != 0) + { + if (continueCallback && !continueCallback()) + return; + // Add accounts to incompletePaths + STPathElement pathElement( + STPathElement::typeAccount, + it->account, + uEndCurrency, + it->account); + incompletePaths.assembleAdd( + currentPath, pathElement); + ++it; + } } } } diff --git a/src/ripple/app/paths/Pathfinder.h b/src/ripple/app/paths/Pathfinder.h index 45da9ec1126..375e5e24677 100644 --- a/src/ripple/app/paths/Pathfinder.h +++ b/src/ripple/app/paths/Pathfinder.h @@ -144,6 +144,7 @@ class Pathfinder : public CountedObject getPathsOut( Currency const& currency, AccountID const& account, + LineDirection direction, bool isDestCurrency, AccountID const& dest, std::function const& continueCallback); diff --git a/src/ripple/app/paths/RippleLineCache.cpp b/src/ripple/app/paths/RippleLineCache.cpp index a0b26ba2841..2487924ff0e 100644 --- a/src/ripple/app/paths/RippleLineCache.cpp +++ b/src/ripple/app/paths/RippleLineCache.cpp @@ -26,39 +26,101 @@ namespace ripple { RippleLineCache::RippleLineCache( std::shared_ptr const& ledger, beast::Journal j) - : journal_(j) + : ledger_(ledger), journal_(j) { - mLedger = ledger; - - JLOG(journal_.debug()) << "RippleLineCache created for ledger " - << mLedger->info().seq; + JLOG(journal_.debug()) << "created for ledger " << ledger_->info().seq; } RippleLineCache::~RippleLineCache() { - JLOG(journal_.debug()) << "~RippleLineCache destroyed for ledger " - << mLedger->info().seq << " with " << lines_.size() - << " accounts"; + JLOG(journal_.debug()) << "destroyed for ledger " << ledger_->info().seq + << " with " << lines_.size() << " accounts and " + << totalLineCount_ << " distinct trust lines."; } -std::vector const& -RippleLineCache::getRippleLines(AccountID const& accountID) +std::shared_ptr> +RippleLineCache::getRippleLines( + AccountID const& accountID, + LineDirection direction) { - AccountKey key(accountID, hasher_(accountID)); + auto const hash = hasher_(accountID); + AccountKey key(accountID, direction, hash); + AccountKey otherkey( + accountID, + direction == LineDirection::outgoing ? LineDirection::incoming + : LineDirection::outgoing, + hash); std::lock_guard sl(mLock); - auto [it, inserted] = lines_.emplace(key, std::vector()); + auto [it, inserted] = [&]() { + if (auto otheriter = lines_.find(otherkey); otheriter != lines_.end()) + { + // The whole point of using the direction flag is to reduce the + // number of trust line objects held in memory. Ensure that there is + // only a single set of trustlines in the cache per account. + auto const size = otheriter->second ? otheriter->second->size() : 0; + JLOG(journal_.info()) + << "Request for " + << (direction == LineDirection::outgoing ? "outgoing" + : "incoming") + << " trust lines for account " << accountID << " found " << size + << (direction == LineDirection::outgoing ? " incoming" + : " outgoing") + << " trust lines. " + << (direction == LineDirection::outgoing + ? "Deleting the subset of incoming" + : "Returning the superset of outgoing") + << " trust lines. "; + if (direction == LineDirection::outgoing) + { + // This request is for the outgoing set, but there is already a + // subset of incoming lines in the cache. Erase that subset + // to be replaced by the full set. The full set will be built + // below, and will be returned, if needed, on subsequent calls + // for either value of outgoing. + assert(size <= totalLineCount_); + totalLineCount_ -= size; + lines_.erase(otheriter); + } + else + { + // This request is for the incoming set, but there is + // already a superset of the outgoing trust lines in the cache. + // The path finding engine will disregard the non-rippling trust + // lines, so to prevent them from being stored twice, return the + // outgoing set. + key = otherkey; + return std::pair{otheriter, false}; + } + } + return lines_.emplace(key, nullptr); + }(); if (inserted) - it->second = PathFindTrustLine::getItems(accountID, *mLedger); + { + assert(it->second == nullptr); + auto lines = + PathFindTrustLine::getItems(accountID, *ledger_, direction); + if (lines.size()) + { + it->second = std::make_shared>( + std::move(lines)); + totalLineCount_ += it->second->size(); + } + } - JLOG(journal_.debug()) << "RippleLineCache getRippleLines for ledger " - << mLedger->info().seq << " found " - << it->second.size() << " lines for " - << (inserted ? "new " : "existing ") << accountID - << " out of a total of " << lines_.size() - << " accounts"; + assert(!it->second || (it->second->size() > 0)); + auto const size = it->second ? it->second->size() : 0; + JLOG(journal_.trace()) << "getRippleLines for ledger " + << ledger_->info().seq << " found " << size + << (key.direction_ == LineDirection::outgoing + ? " outgoing" + : " incoming") + << " lines for " << (inserted ? "new " : "existing ") + << accountID << " out of a total of " + << lines_.size() << " accounts and " + << totalLineCount_ << " trust lines"; return it->second; } diff --git a/src/ripple/app/paths/RippleLineCache.h b/src/ripple/app/paths/RippleLineCache.h index e7a7e0f74a3..590c50082f7 100644 --- a/src/ripple/app/paths/RippleLineCache.h +++ b/src/ripple/app/paths/RippleLineCache.h @@ -44,27 +44,43 @@ class RippleLineCache final : public CountedObject std::shared_ptr const& getLedger() const { - return mLedger; + return ledger_; } - std::vector const& - getRippleLines(AccountID const& accountID); + /** Find the trust lines associated with an account. + + @param accountID The account + @param direction Whether the account is an "outgoing" link on the path. + "Outgoing" is defined as the source account, or an account found via a + trustline that has rippling enabled on the @accountID's side. If an + account is "outgoing", all trust lines will be returned. If an account is + not "outgoing", then any trust lines that don't have rippling enabled are + not usable, so only return trust lines that have rippling enabled on + @accountID's side. + @return Returns a vector of the usable trust lines. + */ + std::shared_ptr> + getRippleLines(AccountID const& accountID, LineDirection direction); private: std::mutex mLock; ripple::hardened_hash<> hasher_; - std::shared_ptr mLedger; + std::shared_ptr ledger_; beast::Journal journal_; struct AccountKey final : public CountedObject { AccountID account_; + LineDirection direction_; std::size_t hash_value_; - AccountKey(AccountID const& account, std::size_t hash) - : account_(account), hash_value_(hash) + AccountKey( + AccountID const& account, + LineDirection direction, + std::size_t hash) + : account_(account), direction_(direction), hash_value_(hash) { } @@ -76,7 +92,8 @@ class RippleLineCache final : public CountedObject bool operator==(AccountKey const& lhs) const { - return hash_value_ == lhs.hash_value_ && account_ == lhs.account_; + return hash_value_ == lhs.hash_value_ && account_ == lhs.account_ && + direction_ == lhs.direction_; } std::size_t @@ -97,8 +114,17 @@ class RippleLineCache final : public CountedObject }; }; - hash_map, AccountKey::Hash> + // Use a shared_ptr so entries can be removed from the map safely. + // Even though a shared_ptr to a vector will take more memory just a vector, + // most accounts are not going to have any entries (estimated over 90%), so + // vectors will not need to be created for them. This should lead to far + // less memory usage overall. + hash_map< + AccountKey, + std::shared_ptr>, + AccountKey::Hash> lines_; + std::size_t totalLineCount_ = 0; }; } // namespace ripple diff --git a/src/ripple/app/paths/TrustLine.cpp b/src/ripple/app/paths/TrustLine.cpp index 12020acf714..14a5d6f8823 100644 --- a/src/ripple/app/paths/TrustLine.cpp +++ b/src/ripple/app/paths/TrustLine.cpp @@ -61,26 +61,38 @@ PathFindTrustLine::makeItem( namespace detail { template std::vector -getTrustLineItems(AccountID const& accountID, ReadView const& view) +getTrustLineItems( + AccountID const& accountID, + ReadView const& view, + LineDirection direction = LineDirection::outgoing) { std::vector items; forEachItem( view, accountID, - [&items, &accountID](std::shared_ptr const& sleCur) { + [&items, &accountID, &direction]( + std::shared_ptr const& sleCur) { auto ret = T::makeItem(accountID, sleCur); - if (ret) + if (ret && + (direction == LineDirection::outgoing || !ret->getNoRipple())) items.push_back(std::move(*ret)); }); + // This list may be around for a while, so free up any unneeded + // capacity + items.shrink_to_fit(); return items; } } // namespace detail std::vector -PathFindTrustLine::getItems(AccountID const& accountID, ReadView const& view) +PathFindTrustLine::getItems( + AccountID const& accountID, + ReadView const& view, + LineDirection direction) { - return detail::getTrustLineItems(accountID, view); + return detail::getTrustLineItems( + accountID, view, direction); } RPCTrustLine::RPCTrustLine( diff --git a/src/ripple/app/paths/TrustLine.h b/src/ripple/app/paths/TrustLine.h index 0217f0e750a..6b27dca3669 100644 --- a/src/ripple/app/paths/TrustLine.h +++ b/src/ripple/app/paths/TrustLine.h @@ -31,6 +31,15 @@ namespace ripple { +/** Describes how an account was found in a path, and how to find the next set +of paths. "Outgoing" is defined as the source account, or an account found via a +trustline that has rippling enabled on the account's side. +"Incoming" is defined as an account found via a trustline that has rippling +disabled on the account's side. Any trust lines for an incoming account that +have rippling disabled are unusable in paths. +*/ +enum class LineDirection : bool { incoming = false, outgoing = true }; + /** Wraps a trust line SLE for convenience. The complication of trust lines is that there is a "low" account and a "high" account. This wraps the @@ -109,6 +118,20 @@ class TrustLineBase return mFlags & (!mViewLowest ? lsfLowNoRipple : lsfHighNoRipple); } + LineDirection + getDirection() const + { + return getNoRipple() ? LineDirection::incoming + : LineDirection::outgoing; + } + + LineDirection + getDirectionPeer() const + { + return getNoRipplePeer() ? LineDirection::incoming + : LineDirection::outgoing; + } + /** Have we set the freeze flag on our peer */ bool getFreeze() const @@ -170,7 +193,10 @@ class PathFindTrustLine final : public TrustLineBase, makeItem(AccountID const& accountID, std::shared_ptr const& sle); static std::vector - getItems(AccountID const& accountID, ReadView const& view); + getItems( + AccountID const& accountID, + ReadView const& view, + LineDirection direction); }; // This wrapper is used for the `AccountLines` command and includes the quality diff --git a/src/ripple/app/rdb/Download.h b/src/ripple/app/rdb/Download.h new file mode 100644 index 00000000000..b72b5ec57e7 --- /dev/null +++ b/src/ripple/app/rdb/Download.h @@ -0,0 +1,79 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_DOWNLOAD_H_INCLUDED +#define RIPPLE_APP_RDB_DOWNLOAD_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +/** + * @brief openDatabaseBodyDb Opens a database that will store the contents of a + * file being downloaded, returns its descriptor, and starts a new + * download process or continues an existing one. + * @param setup Path to the database and other opening parameters. + * @param path Path of the new file to download. + * @return Pair containing a unique pointer to the database and the amount of + * bytes already downloaded if a download is being continued. + */ +std::pair, std::optional> +openDatabaseBodyDb( + DatabaseCon::Setup const& setup, + boost::filesystem::path const& path); + +/** + * @brief databaseBodyDoPut Saves a new fragment of a downloaded file. + * @param session Session with the database. + * @param data Downloaded fragment of file data to save. + * @param path Path to the file currently being downloaded. + * @param fileSize Size of the portion of the file already downloaded. + * @param part The index of the most recently updated database row. + * @param maxRowSizePad A constant padding value that accounts for other data + * stored in each row of the database. + * @return Index of the most recently updated database row. + */ +std::uint64_t +databaseBodyDoPut( + soci::session& session, + std::string const& data, + std::string const& path, + std::uint64_t fileSize, + std::uint64_t part, + std::uint16_t maxRowSizePad); + +/** + * @brief databaseBodyFinish Finishes the download process and writes the file + * to disk. + * @param session Session with the database. + * @param fout Opened file into which the downloaded data from the database will + * be written. + */ +void +databaseBodyFinish(soci::session& session, std::ofstream& fout); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/PeerFinder.h b/src/ripple/app/rdb/PeerFinder.h new file mode 100644 index 00000000000..06cd80f670b --- /dev/null +++ b/src/ripple/app/rdb/PeerFinder.h @@ -0,0 +1,76 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_PEERFINDER_H_INCLUDED +#define RIPPLE_APP_RDB_PEERFINDER_H_INCLUDED + +#include +#include +#include + +namespace ripple { + +/** + * @brief initPeerFinderDB Opens a session with the peer finder database. + * @param session Session with the peer finder database. + * @param config Path to the database and other opening parameters. + * @param j Journal. + */ +void +initPeerFinderDB( + soci::session& session, + BasicConfig const& config, + beast::Journal j); + +/** + * @brief updatePeerFinderDB Updates the peer finder database to a new version. + * @param session Session with the database. + * @param currentSchemaVersion New version of the database. + * @param j Journal. + */ +void +updatePeerFinderDB( + soci::session& session, + int currentSchemaVersion, + beast::Journal j); + +/** + * @brief readPeerFinderDB Reads all entries from the peer finder database and + * invokes the given callback for each entry. + * @param session Session with the database. + * @param func Callback to invoke for each entry. + */ +void +readPeerFinderDB( + soci::session& session, + std::function const& func); + +/** + * @brief savePeerFinderDB Saves a new entry to the peer finder database. + * @param session Session with the database. + * @param v Entry to save which contains information about a new peer. + */ +void +savePeerFinderDB( + soci::session& session, + std::vector const& v); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/README.md b/src/ripple/app/rdb/README.md new file mode 100644 index 00000000000..1a68a1ae5e3 --- /dev/null +++ b/src/ripple/app/rdb/README.md @@ -0,0 +1,102 @@ +# Relational Database Interface + +The guiding principles of the Relational Database Interface are summarized below: + +* All hard-coded SQL statements should be stored in the [files](#source-files) under the `ripple/app/rdb` directory. With the exception of test modules, no hard-coded SQL should be added to any other file in rippled. +* The base class `RelationalDatabase` is inherited by derived classes that each provide an interface for operating on distinct relational database systems. +* For future use, the shard store will be used if the node store is absent. + +## Overview + +Firstly, the interface `RelationalDatabase` is inherited by the classes `SQLiteDatabase` and `PostgresDatabase` which are used to operate the software's main data store (for storing transactions, accounts, ledgers, etc.). Secondly, the files under the `detail` directory provide supplementary functions that are used by these derived classes to access the underlying databases. Lastly, the remaining files in the interface (located at the top level of the module) are used by varied parts of the software to access any secondary relational databases. + +## Configuration + +The config section `[relational_db]` has a property named `backend` whose value designates which database implementation will be used for node or shard databases. Presently the only valid value for this property is `sqlite`: + +``` +[relational_db] +backend=sqlite +``` + +## Source Files + +The Relational Database Interface consists of the following directory structure (as of November 2021): + +``` +src/ripple/app/rdb/ +├── backend +│   ├── detail +│   │   ├── impl +│   │   │   ├── Node.cpp +│   │   │   └── Shard.cpp +│   │   ├── Node.h +│   │   └── Shard.h +│   ├── impl +│   │   ├── PostgresDatabase.cpp +│   │   └── SQLiteDatabase.cpp +│   ├── PostgresDatabase.h +│   └── SQLiteDatabase.h +├── impl +│   ├── Download.cpp +│   ├── PeerFinder.cpp +│   ├── RelationalDatabase.cpp +│   ├── ShardArchive.cpp +│   ├── State.cpp +│   ├── UnitaryShard.cpp +│   ├── Vacuum.cpp +│   └── Wallet.cpp +├── Download.h +├── PeerFinder.h +├── RelationalDatabase.h +├── README.md +├── ShardArchive.h +├── State.h +├── UnitaryShard.h +├── Vacuum.h +└── Wallet.h +``` + +### File Contents +| File | Contents | +| ----------- | ----------- | +| `Node.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite node databases| +| `Shard.[h\|cpp]` | Defines/Implements methods used by `SQLiteDatabase` for interacting with SQLite shard databases | +| `PostgresDatabase.[h\|cpp]` | Defines/Implements the class `PostgresDatabase`/`PostgresDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | +|`SQLiteDatabase.[h\|cpp]`| Defines/Implements the class `SQLiteDatabase`/`SQLiteDatabaseImp` which inherits from `RelationalDatabase` and is used to operate on the main stores | +| `Download.[h\|cpp]` | Defines/Implements methods for persisting file downloads to a SQLite database | +| `PeerFinder.[h\|cpp]` | Defines/Implements methods for interacting with the PeerFinder SQLite database | +|`RelationalDatabase.cpp`| Implements the static method `RelationalDatabase::init` which is used to initialize an instance of `RelationalDatabase` | +| `RelationalDatabase.h` | Defines the abstract class `RelationalDatabase`, the primary class of the Relational Database Interface | +| `ShardArchive.[h\|cpp]` | Defines/Implements methods used by `ShardArchiveHandler` for interacting with SQLite databases containing metadata regarding shard downloads | +| `State.[h\|cpp]` | Defines/Implements methods for interacting with the State SQLite database which concerns ledger deletion and database rotation | +| `UnitaryShard.[h\|cpp]` | Defines/Implements methods used by a unitary instance of `Shard` for interacting with the various SQLite databases thereof. These files are distinct from `Shard.[h\|cpp]` which contain methods used by `SQLiteDatabaseImp` | +| `Vacuum.[h\|cpp]` | Defines/Implements a method for performing the `VACUUM` operation on SQLite databases | +| `Wallet.[h\|cpp]` | Defines/Implements methods for interacting with Wallet SQLite databases | + +## Classes + +The abstract class `RelationalDatabase` is the primary class of the Relational Database Interface and is defined in the eponymous header file. This class provides a static method `init()` which, when invoked, creates a concrete instance of a derived class whose type is specified by the system configuration. All other methods in the class are virtual. Presently there exist two classes that derive from `RelationalDatabase`, namely `SQLiteDatabase` and `PostgresDatabase`. + +## Database Methods + +The Relational Database Interface provides three categories of methods for interacting with databases: + +* Free functions for interacting with SQLite databases used by various components of the software. These methods feature a `soci::session` parameter which facilitates connecting to SQLite databases, and are defined and implemented in the following files: + + * `Download.[h\|cpp]` + * `PeerFinder.[h\|cpp]` + * `ShardArchive.[h\|cpp]` + * `State.[h\|cpp]` + * `UnitaryShard.[h\|cpp]` + * `Vacuum.[h\|cpp]` + * `Wallet.[h\|cpp]` + + +* Free functions used exclusively by `SQLiteDatabaseImp` for interacting with SQLite databases owned by the node store or shard store. Unlike the free functions in the files listed above, these are not intended to be invoked directly by clients. Rather, these methods are invoked by derived instances of `RelationalDatabase`. These methods are defined in the following files: + + * `Node.[h|cpp]` + * `Shard.[h|cpp]` + + +* Member functions of `RelationalDatabase`, `SQLiteDatabase`, and `PostgresDatabase` which are used to access the main stores (node store, shard store). The `SQLiteDatabase` class will access the node store by default, but will use shard databases if the node store is not present and the shard store is available. The class `PostgresDatabase` uses only the node store. diff --git a/src/ripple/app/rdb/RelationalDBInterface.md b/src/ripple/app/rdb/RelationalDBInterface.md deleted file mode 100644 index 302c9befeba..00000000000 --- a/src/ripple/app/rdb/RelationalDBInterface.md +++ /dev/null @@ -1,288 +0,0 @@ -# Relational Database Interface - -Here are main principles of Relational DB interface: - -1) All SQL hard code is in the files described below in Files section. -No hard-coded SQL should be added to any other file in rippled, except related -to tests for specific SQL implementations. -2) Pure interface class `RelationalDBInterface` can have several -implementations for different relational database types. -3) For future use, if the node database is absent, then shard databases will -be used. - -## Configuration - -Section `[relational_db]` of the configuration file contains parameter -`backend`. The value of this parameter is the name of relational database -implementation used for node or shard databases. At the present, the only valid -value of this parameter is `sqlite`. - -## Files - -The following source files are related to Relational DB interface: - -- `ripple/app/rdb/RelationalDBInterface.h` - definition of main pure class of -the interface, `RelationalDBInterface`; -- `ripple/app/rdb/impl/RelationalDBInterface.cpp` - implementation of static -method `init()` of the class `RelationalDBInterface`; -- `ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h` - definition of pure -class `RelationalDBInterfaceSqlite` derived from `RelationalDBInterface`; -this is base class for sqlite implementation of the interface; -- `ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp` - implementation of -`RelationalDBInterfaceSqlite`-derived class for the case of sqlite databases; -- `ripple/app/rdb/backend/RelationalDBInterfacePostgres.h` - definition of pure -class `RelationalDBInterfacePostgres` derived from `RelationalDBInterface`; -this is base class for postgres implementation of the interface; -- `ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp` - implementation -of `RelationalDBInterfacePostgres`-derived class for the case of postgres -databases; -- `ripple/app/rdb/RelationalDBInterface_global.h` - definitions of global -methods for all sqlite databases except of node and shard; -- `ripple/app/rdb/impl/RelationalDBInterface_global.cpp` - implementations of -global methods for all sqlite databases except of node and shard; -- `ripple/app/rdb/RelationalDBInterface_nodes.h` - definitions of global -methods for sqlite node databases; -- `ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp` - implementations of -global methods for sqlite node databases; -- `ripple/app/rdb/RelationalDBInterface_shards.h` - definitions of global -methods for sqlite shard databases; -- `ripple/app/rdb/impl/RelationalDBInterface_shards.cpp` - implementations of -global methods for sqlite shard databases; -- `ripple/app/rdb/RelationalDBInterface_postgres.h` - definitions of internal -methods for postgres databases; -- `ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp` - implementations of -internal methods for postgres databases; - -## Classes - -The main class of the interface is `class RelationalDBInterface`. It is defined -in the file `RelationalDBInterface.h`. This class has static method `init()` -which allow to create proper `RelationalDBInterface`-derived class specified -in the config. All other methods are pure virtual. These methods do not use -database as a parameter. It assumed that implementation of class derived from -`RelationalDBInterface` holds all database pointers inside and uses appropriate -databases (nodes or shards) to get return values required by each method. - -At the present, there are two implementations of the derived classes - -`class RelationalDBInterfaceSqlite` for sqlite database (it is located in the -file `RelationalDBInterfaceSqlite.cpp`) and -`class RelationalDBInterfacePostgres` for postgres database (it is located in -the file `RelationalDBInterfacePostgres.cpp`) - -## Methods - -There are 3 types of methods for SQL interface: - -1) Global methods for work with all databases except of node. In particular, -methods related to shards datavases only. These methods are sqlite-specific. -They use `soci::session` as database pointer parameter. Defined and -implemented in files `RelationalDBInterface_global.*` and -`RelationalDBInterface_shard.*`. - -2) Global methods for work with node databases, and also with shard databases. -For sqlite case, these methods are internal for `RelationalDBInterfaceSqlite` -implementation of the class `RelationalDBInterface`. They use `soci::session` -as database pointer parameter. Defined and implemented in files -`RelationalDBInterface_nodes.*`. For postgres case, these methods are internal -for `RelationalDBInterfacePostgres` implementation of the class -`RelationalDBInterface`. They use `std::shared_ptr` as database pointer -parameter. Defined and implemented in files `RelationalDBInterface_postgres.*`. - -3) Virtual methods of class `RelationalDBInterface` and also derived classes -`RelationalDBInterfaceSqlite` and `RelationalDBInterfacePostgres`. -Calling such a method resulted in calling corresponding method from -`RelationalDBInterface`-derived class. For sqlite case, such a method tries to -retrieve information from node database, and if this database not exists - then -from shard databases. For both node and shard databases, calls to global -methods of type 2) performed. For postgres case, such a method retrieves -information only from node database by calling a global method of type 2). - -## Methods lists - -### Type 1 methods - -#### Files RelationalDBInterface_global.* - -Wallet DB methods: -``` -makeWalletDB -makeTestWalletDB -getManifests -saveManifests -addValidatorManifest -getNodeIdentity -getPeerReservationTable -insertPeerReservation -deletePeerReservation -createFeatureVotes -readAmendments -voteAmendment -``` - -State DB methods: -``` -initStateDB -getCanDelete -setCanDelete -getSavedState -setSavedState -setLastRotated -``` - -DatabaseBody DB methods: -``` -openDatabaseBodyDb -databaseBodyDoPut -databaseBodyFinish -``` - -Vacuum DB method: -``` -doVacuumDB -``` - -PeerFinder DB methods: -``` -initPeerFinderDB -updatePeerFinderDB -readPeerFinderDB -savePeerFinderDB -``` - -#### Files RelationalDBInterface_shards.* - -Shards DB methods: -``` -makeShardCompleteLedgerDBs -makeShardIncompleteLedgerDBs -updateLedgerDBs -``` - -Shard acquire DB methods: -``` -makeAcquireDB -insertAcquireDBIndex -selectAcquireDBLedgerSeqs -selectAcquireDBLedgerSeqsHash -updateAcquireDB -``` - -Shard archive DB methods: -``` -makeArchiveDB -readArchiveDB -insertArchiveDB -deleteFromArchiveDB -dropArchiveDB -``` - -### Type 2 methods - -#### Files RelationalDBInterface_nodes.* - -``` -makeLedgerDBs -getMinLedgerSeq -getMaxLedgerSeq -deleteByLedgerSeq -deleteBeforeLedgerSeq -getRows -getRowsMinMax -saveValidatedLedger -getLedgerInfoByIndex -getOldestLedgerInfo -getNewestLedgerInfo -getLimitedOldestLedgerInfo -getLimitedNewestLedgerInfo -getLedgerInfoByHash -getHashByIndex -getHashesByIndex -getHashesByIndex -getTxHistory -getOldestAccountTxs -getNewestAccountTxs -getOldestAccountTxsB -getNewestAccountTxsB -oldestAccountTxPage -newestAccountTxPage -getTransaction -DbHasSpace -``` - -#### Files RelationalDBInterface_postgres.* - -``` -getMinLedgerSeq -getMaxLedgerSeq -getCompleteLedgers -getValidatedLedgerAge -getNewestLedgerInfo -getLedgerInfoByIndex -getLedgerInfoByHash -getHashByIndex -getHashesByIndex -getTxHashes -getAccountTx -locateTransaction -writeLedgerAndTransactions -getTxHistory -``` - -### Type 3 methods - -#### Files RelationalDBInterface.* - -``` -init -getMinLedgerSeq -getMaxLedgerSeq -getLedgerInfoByIndex -getNewestLedgerInfo -getLedgerInfoByHash -getHashByIndex -getHashesByIndex -getTxHistory -ledgerDbHasSpace -transactionDbHasSpace -``` - -#### Files backend/RelationalDBInterfaceSqlite.* - -``` -getTransactionsMinLedgerSeq -getAccountTransactionsMinLedgerSeq -deleteTransactionByLedgerSeq -deleteBeforeLedgerSeq -deleteTransactionsBeforeLedgerSeq -deleteAccountTransactionsBeforeLedgerSeq -getTransactionCount -getAccountTransactionCount -getLedgerCountMinMax -saveValidatedLedger -getLimitedOldestLedgerInfo -getLimitedNewestLedgerInfo -getOldestAccountTxs -getNewestAccountTxs -getOldestAccountTxsB -getNewestAccountTxsB -oldestAccountTxPage -newestAccountTxPage -oldestAccountTxPageB -newestAccountTxPageB -getTransaction -getKBUsedAll -getKBUsedLedger -getKBUsedTransaction -``` - -#### Files backend/RelationalDBInterfacePostgres.* - -``` -sweep -getCompleteLedgers -getValidatedLedgerAge -writeLedgerAndTransactions -getTxHashes -getAccountTx -locateTransaction -``` diff --git a/src/ripple/app/rdb/RelationalDBInterface_global.h b/src/ripple/app/rdb/RelationalDBInterface_global.h deleted file mode 100644 index 3eb0469ee8e..00000000000 --- a/src/ripple/app/rdb/RelationalDBInterface_global.h +++ /dev/null @@ -1,333 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_GLOBAL_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACE_GLOBAL_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -/* Wallet DB */ - -/** - * @brief makeWalletDB Opens wallet DB and returns it. - * @param setup Path to database and other opening parameters. - * @return Unique pointer to database descriptor. - */ -std::unique_ptr -makeWalletDB(DatabaseCon::Setup const& setup); - -/** - * @brief makeTestWalletDB Opens test wallet DB with arbitrary name. - * @param setup Path to database and other opening parameters. - * @param dbname Name of database. - * @return Unique pointer to database descriptor. - */ -std::unique_ptr -makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname); - -/** - * @brief getManifests Loads manifest from wallet DB and stores it in the cache. - * @param session Session with database. - * @param dbTable Name of table in the database to extract manifest from. - * @param mCache Cache to store manifest. - * @param j Journal. - */ -void -getManifests( - soci::session& session, - std::string const& dbTable, - ManifestCache& mCache, - beast::Journal j); - -/** - * @brief saveManifests Saves all given manifests to database. - * @param session Session with database. - * @param dbTable Name of database table to save manifest into. - * @param isTrusted Callback returned true if key is trusted. - * @param map Map to save which points public keys to manifests. - * @param j Journal. - */ -void -saveManifests( - soci::session& session, - std::string const& dbTable, - std::function const& isTrusted, - hash_map const& map, - beast::Journal j); - -/** - * @brief addValidatorManifest Saves manifest of validator to database. - * @param session Session with database. - * @param serialized Manifest of validator in raw format. - */ -void -addValidatorManifest(soci::session& session, std::string const& serialized); - -/** - * @brief getNodeIdentity Returns public and private keys of this node. - * @param session Session with database. - * @return Pair of public and private keys. - */ -std::pair -getNodeIdentity(soci::session& session); - -/** - * @brief getPeerReservationTable Returns peer reservation table. - * @param session Session with database. - * @param j Journal. - * @return Peer reservation hash table. - */ -std::unordered_set, KeyEqual> -getPeerReservationTable(soci::session& session, beast::Journal j); - -/** - * @brief insertPeerReservation Adds entry to peer reservation table. - * @param session Session with database. - * @param nodeId public key of node. - * @param description Description of node. - */ -void -insertPeerReservation( - soci::session& session, - PublicKey const& nodeId, - std::string const& description); - -/** - * @brief deletePeerReservation Deletes entry from peer reservation table. - * @param session Session with database. - * @param nodeId Public key of node to remove. - */ -void -deletePeerReservation(soci::session& session, PublicKey const& nodeId); - -/** - * @brief createFeatureVotes Creates FeatureVote table if it is not exists. - * @param session Session with walletDB database. - * @return true if the table already exists - */ -bool -createFeatureVotes(soci::session& session); - -// For historical reasons the up-vote and down-vote integer representations -// are unintuitive. -enum class AmendmentVote : int { up = 0, down = 1 }; - -/** - * @brief readAmendments Read all amendments from FeatureVotes table. - * @param session Session with walletDB database. - * @param callback Callback called for each amendment passing its hash, name - * and the flag if it should be vetoed as callback parameters - */ -void -readAmendments( - soci::session& session, - std::function amendment_hash, - boost::optional amendment_name, - boost::optional vote)> const& callback); - -/** - * @brief voteAmendment Set veto value for particular amendment. - * @param session Session with walletDB database. - * @param amendment Hash of amendment. - * @param name Name of amendment. - * @param vote Whether to vote in favor of this amendment. - */ -void -voteAmendment( - soci::session& session, - uint256 const& amendment, - std::string const& name, - AmendmentVote vote); - -/* State DB */ - -struct SavedState -{ - std::string writableDb; - std::string archiveDb; - LedgerIndex lastRotated; -}; - -/** - * @brief initStateDB Opens DB session with State DB. - * @param session Structure to open session in. - * @param config Path to database and other opening parameters. - * @param dbName Name of database. - */ -void -initStateDB( - soci::session& session, - BasicConfig const& config, - std::string const& dbName); - -/** - * @brief getCanDelete Returns ledger sequence which can be deleted. - * @param session Session with database. - * @return Ledger sequence. - */ -LedgerIndex -getCanDelete(soci::session& session); - -/** - * @brief setCanDelete Updates ledger sequence which can be deleted. - * @param session Session with database. - * @param canDelete Ledger sequence to save. - * @return Previous value of ledger sequence whic can be deleted. - */ -LedgerIndex -setCanDelete(soci::session& session, LedgerIndex canDelete); - -/** - * @brief getSavedState Returns saved state. - * @param session Session with database. - * @return The SavedState structure which contains names of - * writable DB, archive DB and last rotated ledger sequence. - */ -SavedState -getSavedState(soci::session& session); - -/** - * @brief setSavedState Saves given state. - * @param session Session with database. - * @param state The SavedState structure which contains names of - * writable DB, archive DB and last rotated ledger sequence. - */ -void -setSavedState(soci::session& session, SavedState const& state); - -/** - * @brief setLastRotated Updates last rotated ledger sequence. - * @param session Session with database. - * @param seq New value of last rotated ledger sequence. - */ -void -setLastRotated(soci::session& session, LedgerIndex seq); - -/* DatabaseBody DB */ - -/** - * @brief openDatabaseBodyDb Opens file download DB and returns its descriptor. - * Start new download process or continue existing one. - * @param setup Path to database and other opening parameters. - * @param path Path of new file to download. - * @return Pair of unique pointer to database and current downloaded size - * if download process continues. - */ -std::pair, std::optional> -openDatabaseBodyDb( - DatabaseCon::Setup const& setup, - boost::filesystem::path const& path); - -/** - * @brief databaseBodyDoPut Saves new fragment of downloaded file. - * @param session Session with database. - * @param data Downloaded piece to file data tp save. - * @param path Path of downloading file. - * @param fileSize Size of downloaded piece of file. - * @param part Sequence number of downloaded file part. - * @param maxRowSizePad Maximum size of file part to save. - * @return Number of saved parts. Downloaded piece may be splitted - * into several parts of size not large that maxRowSizePad. - */ -std::uint64_t -databaseBodyDoPut( - soci::session& session, - std::string const& data, - std::string const& path, - std::uint64_t fileSize, - std::uint64_t part, - std::uint16_t maxRowSizePad); - -/** - * @brief databaseBodyFinish Finishes download process and writes file to disk. - * @param session Session with database. - * @param fout Opened file to write downloaded data from database. - */ -void -databaseBodyFinish(soci::session& session, std::ofstream& fout); - -/* Vacuum DB */ - -/** - * @brief doVacuumDB Creates, initialises DB, and performs its cleanup. - * @param setup Path to database and other opening parameters. - * @return True if vacuum process completed successfully. - */ -bool -doVacuumDB(DatabaseCon::Setup const& setup); - -/* PeerFinder DB */ - -/** - * @brief initPeerFinderDB Opens session with peer finder database. - * @param session Structure to open session in. - * @param config Path to database and other opening parameters. - * @param j Journal. - */ -void -initPeerFinderDB( - soci::session& session, - BasicConfig const& config, - beast::Journal j); - -/** - * @brief updatePeerFinderDB Update peer finder DB to new version. - * @param session Session with database. - * @param currentSchemaVersion New version of database. - * @param j Journal. - */ -void -updatePeerFinderDB( - soci::session& session, - int currentSchemaVersion, - beast::Journal j); - -/** - * @brief readPeerFinderDB Read all entries from peer finder DB and call - * given callback for each entry. - * @param session Session with database. - * @param func Callback to call for each entry. - */ -void -readPeerFinderDB( - soci::session& session, - std::function const& func); - -/** - * @brief savePeerFinderDB Save new entry to peer finder DB. - * @param session Session with database. - * @param v Entry to save which contains information about new peer. - */ -void -savePeerFinderDB( - soci::session& session, - std::vector const& v); - -} // namespace ripple - -#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface_postgres.h b/src/ripple/app/rdb/RelationalDBInterface_postgres.h deleted file mode 100644 index f5838813b5e..00000000000 --- a/src/ripple/app/rdb/RelationalDBInterface_postgres.h +++ /dev/null @@ -1,248 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_POSTGRES_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACE_POSTGRES_H_INCLUDED - -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -class PgPool; - -using AccountTxMarker = RelationalDBInterface::AccountTxMarker; -using AccountTxArgs = RelationalDBInterface::AccountTxArgs; -using AccountTxResult = RelationalDBInterface::AccountTxResult; -using AccountTransactionsData = RelationalDBInterface::AccountTransactionsData; - -/** - * @brief getMinLedgerSeq Returns minimum ledger sequence - * from Postgres database - * @param pgPool Link to postgres database - * @param app Application - * @param j Journal - * @return Minimum ledger sequence if any, none if no ledgers - */ -std::optional -getMinLedgerSeq(std::shared_ptr const& pgPool, beast::Journal j); - -/** - * @brief getMaxLedgerSeq Returns maximum ledger sequence - * from Postgres database - * @param pgPool Link to postgres database - * @param app Application - * @return Maximum ledger sequence if any, none if no ledgers - */ -std::optional -getMaxLedgerSeq(std::shared_ptr const& pgPool); - -/** - * @brief getCompleteLedgers Returns string which contains - * list of completed ledgers - * @param pgPool Link to postgres database - * @param app Application - * @return String with completed ledgers - */ -std::string -getCompleteLedgers(std::shared_ptr const& pgPool); - -/** - * @brief getValidatedLedgerAge Returns age of last - * validated ledger - * @param pgPool Link to postgres database - * @param app Application - * @param j Journal - * @return Age of last validated ledger - */ -std::chrono::seconds -getValidatedLedgerAge(std::shared_ptr const& pgPool, beast::Journal j); - -/** - * @brief getNewestLedgerInfo Load latest ledger info from Postgres - * @param pgPool Link to postgres database - * @param app reference to Application - * @return Ledger info - */ -std::optional -getNewestLedgerInfo(std::shared_ptr const& pgPool, Application& app); - -/** - * @brief getLedgerInfoByIndex Load ledger info by index (AKA sequence) - * from Postgres - * @param pgPool Link to postgres database - * @param ledgerIndex the ledger index (or sequence) to load - * @param app reference to Application - * @return Ledger info - */ -std::optional -getLedgerInfoByIndex( - std::shared_ptr const& pgPool, - std::uint32_t ledgerIndex, - Application& app); - -/** - * @brief getLedgerInfoByHash Load ledger info by hash from Postgres - * @param pgPool Link to postgres database - * @param hash Hash of the ledger to load - * @param app reference to Application - * @return Ledger info - */ -std::optional -getLedgerInfoByHash( - std::shared_ptr const& pgPool, - uint256 const& ledgerHash, - Application& app); - -/** - * @brief getHashByIndex Given a ledger sequence, - * return the ledger hash - * @param pgPool Link to postgres database - * @param ledgerIndex Ledger sequence - * @param app Application - * @return Hash of ledger - */ -uint256 -getHashByIndex( - std::shared_ptr const& pgPool, - std::uint32_t ledgerIndex, - Application& app); - -/** - * @brief getHashesByIndex Given a ledger sequence, - * return the ledger hash and the parent hash - * @param pgPool Link to postgres database - * @param ledgerIndex Ledger sequence - * @param[out] ledgerHash Hash of ledger - * @param[out] parentHash Hash of parent ledger - * @param app Application - * @return True if the data was found - */ -bool -getHashesByIndex( - std::shared_ptr const& pgPool, - std::uint32_t ledgerIndex, - uint256& ledgerHash, - uint256& parentHash, - Application& app); - -/** - * @brief getHashesByIndex Given a contiguous range of sequences, - * return a map of sequence -> (hash, parent hash) - * @param pgPool Link to postgres database - * @param minSeq Lower bound of range - * @param maxSeq Upper bound of range - * @param app Application - * @return Mapping of all found ledger sequences to their hash and parent hash - */ -std::map -getHashesByIndex( - std::shared_ptr const& pgPool, - std::uint32_t minSeq, - std::uint32_t maxSeq, - Application& app); - -/** - * @brief getTxHashes Returns vector of tx hashes by given ledger - * sequence - * @param pgPool Link to postgres database - * @param seq Ledger sequence - * @param app Application - * @return Vector of tx hashes - */ -std::vector -getTxHashes( - std::shared_ptr const& pgPool, - LedgerIndex seq, - Application& app); - -/** - * @brief locateTransaction Returns information used to locate - * a transaction. Function is specific to postgres backend. - * @param pgPool Link to postgres database - * @param id Hash of the transaction. - * @param app Application - * @return Information used to locate a transaction. Contains a nodestore - * hash and ledger sequence pair if the transaction was found. - * Otherwise, contains the range of ledgers present in the database - * at the time of search. - */ -Transaction::Locator -locateTransaction( - std::shared_ptr const& pgPool, - uint256 const& id, - Application& app); - -/** - * @brief getTxHistory Returns most recent 20 transactions starting - * from given number or entry. - * @param pgPool Link to postgres database - * @param startIndex First number of returned entry. - * @param app Application - * @param j Journal - * @return Vector of sharded pointers to transactions sorted in - * descending order by ledger sequence. - */ -std::vector> -getTxHistory( - std::shared_ptr const& pgPool, - LedgerIndex startIndex, - Application& app, - beast::Journal j); - -/** - * @brief getAccountTx Get last account transactions specifies by - * passed argumenrs structure. - * @param pgPool Link to postgres database - * @param args Arguments which specify account and whose tx to return. - * @param app Application - * @param j Journal - * @return Vector of account transactions and RPC status of responce. - */ -std::pair -getAccountTx( - std::shared_ptr const& pgPool, - AccountTxArgs const& args, - Application& app, - beast::Journal j); - -/** - * @brief writeLedgerAndTransactions Write new ledger and transaction - * data to Postgres. - * @param pgPool Pool of Postgres connections - * @param info Ledger info to write. - * @param accountTxData Transaction data to write - * @param j Journal (for logging) - * @return True if success, false if failure. - */ -bool -writeLedgerAndTransactions( - std::shared_ptr const& pgPool, - LedgerInfo const& info, - std::vector const& accountTxData, - beast::Journal& j); - -} // namespace ripple - -#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface_shards.h b/src/ripple/app/rdb/RelationalDBInterface_shards.h deleted file mode 100644 index 16ef67d210d..00000000000 --- a/src/ripple/app/rdb/RelationalDBInterface_shards.h +++ /dev/null @@ -1,257 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_SHARDS_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACE_SHARDS_H_INCLUDED - -#include -#include -#include -#include -#include - -namespace ripple { - -struct DatabasePair -{ - std::unique_ptr ledgerDb; - std::unique_ptr transactionDb; -}; - -/* Shard DB */ - -/** - * @brief makeMetaDBs Opens ledger and transaction 'meta' databases which - * map ledger hashes and transaction IDs to the index of the shard - * that holds the ledger or transaction. - * @param config Config object. - * @param setup Path to database and opening parameters. - * @param checkpointerSetup Database checkpointer setup. - * @return Struct DatabasePair which contains unique pointers to the ledger - * and transaction databases. - */ -DatabasePair -makeMetaDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - -/** - * @brief saveLedgerMeta Stores (transaction ID -> shard index) and - * (ledger hash -> shard index) mappings in the meta databases. - * @param ledger The ledger. - * @param app Application object. - * @param lgrMetaSession Session to ledger meta database. - * @param txnMetaSession Session to transaction meta database. - * @param shardIndex The index of the shard that contains this ledger. - * @return True on success. - */ -bool -saveLedgerMeta( - std::shared_ptr const& ledger, - Application& app, - soci::session& lgrMetaSession, - soci::session& txnMetaSession, - std::uint32_t shardIndex); - -/** - * @brief getShardIndexforLedger Queries the ledger meta database to - * retrieve the index of the shard that contains this ledger. - * @param session Session to the database. - * @param hash Hash of the ledger. - * @return The index of the shard on success, otherwise an unseated value. - */ -std::optional -getShardIndexforLedger(soci::session& session, LedgerHash const& hash); - -/** - * @brief getShardIndexforTransaction Queries the transaction meta database to - * retrieve the index of the shard that contains this transaction. - * @param session Session to the database. - * @param id ID of the transaction. - * @return The index of the shard on success, otherwise an unseated value. - */ -std::optional -getShardIndexforTransaction(soci::session& session, TxID const& id); - -/** - * @brief makeShardCompleteLedgerDBs Opens shard databases for already - * verified shard and returns its descriptors. - * @param config Config object. - * @param setup Path to database and other opening parameters. - * @return Pair of unique pointers to opened ledger and transaction databases. - */ -DatabasePair -makeShardCompleteLedgerDBs( - Config const& config, - DatabaseCon::Setup const& setup); - -/** - * @brief makeShardIncompleteLedgerDBs Opens shard databases for not - * fully downloaded or verified shard and returns its descriptors. - * @param config Config object. - * @param setup Path to database and other opening parameters. - * @param checkpointerSetup Checkpointer parameters. - * @return Pair of unique pointers to opened ledger and transaction databases. - */ -DatabasePair -makeShardIncompleteLedgerDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - -/** - * @brief updateLedgerDBs Save given ledger to shard databases. - * @param txdb Session with transaction DB. - * @param lgrdb Sessiob with ledger DB. - * @param ledger Ledger to save. - * @param index Index of the shard which the ledger belonfs to. - * @param stop Link to atomic flag which can stop the process if raised. - * @param j Journal - * @return True if ledger was successfully saved. - */ -bool -updateLedgerDBs( - soci::session& txdb, - soci::session& lgrdb, - std::shared_ptr const& ledger, - std::uint32_t index, - std::atomic& stop, - beast::Journal j); - -/* Shard acquire DB */ - -/** - * @brief makeAcquireDB Opens shard acquire DB and returns its descriptor. - * @param setup Path to DB and other opening parameters. - * @param checkpointerSetup Checkpointer parameters. - * @return Uniqye pointer to opened database. - */ -std::unique_ptr -makeAcquireDB( - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup); - -/** - * @brief insertAcquireDBIndex Adds new shard index to shard acquire DB. - * @param session Session with database. - * @param index Index to add. - */ -void -insertAcquireDBIndex(soci::session& session, std::uint32_t index); - -/** - * @brief selectAcquireDBLedgerSeqs Returns set of acquired ledgers for - * given shard. - * @param session Session with database. - * @param index Shard index. - * @return Pair which contains true if such as index found in database, - * and string which contains set of ledger sequences. - * If set of sequences was not saved than none is returned. - */ -std::pair> -selectAcquireDBLedgerSeqs(soci::session& session, std::uint32_t index); - -struct AcquireShardSeqsHash -{ - std::optional sequences; - std::optional hash; -}; - -/** - * @brief selectAcquireDBLedgerSeqsHash Returns set of acquired ledgers and - * hash for given shard. - * @param session Session with database. - * @param index Shard index. - * @return Pair which contains true of such an index found in database, - * and the AcquireShardSeqsHash structure which contains string - * with ledger sequences set and string with last ledger hash. - * If set of sequences or hash were not saved than none is returned. - */ -std::pair -selectAcquireDBLedgerSeqsHash(soci::session& session, std::uint32_t index); - -/** - * @brief updateAcquireDB Updates information in acquire DB. - * @param session Session with database. - * @param ledger Ledger to save into database. - * @param index Shard index. - * @param lastSeq Last acqyured ledger sequence. - * @param seqs Current set or acquired ledger sequences if it's not empty. - */ -void -updateAcquireDB( - soci::session& session, - std::shared_ptr const& ledger, - std::uint32_t index, - std::uint32_t lastSeq, - std::optional const& seqs); - -/* Archive DB */ - -/** - * @brief makeArchiveDB Opens shard archive DB and returns its descriptor. - * @param dir Path to database to open. - * @param dbName Name of database. - * @return Unique pointer to opened database. - */ -std::unique_ptr -makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName); - -/** - * @brief readArchiveDB Read entries from shard archive database and calls - * fiven callback for each entry. - * @param db Session with database. - * @param func Callback to call for each entry. - */ -void -readArchiveDB( - DatabaseCon& db, - std::function const& func); - -/** - * @brief insertArchiveDB Adds entry to shard archive database. - * @param db Session with database. - * @param shardIndex Shard index to add. - * @param url Shard download url to add. - */ -void -insertArchiveDB( - DatabaseCon& db, - std::uint32_t shardIndex, - std::string const& url); - -/** - * @brief deleteFromArchiveDB Deletes entry from shard archive DB. - * @param db Session with database. - * @param shardIndex Shard index to remove from DB. - */ -void -deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex); - -/** - * @brief dropArchiveDB Removes table in shard archive DB. - * @param db Session with database. - */ -void -dropArchiveDB(DatabaseCon& db); - -} // namespace ripple - -#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface.h b/src/ripple/app/rdb/RelationalDatabase.h similarity index 76% rename from src/ripple/app/rdb/RelationalDBInterface.h rename to src/ripple/app/rdb/RelationalDatabase.h index 759261832db..a269bf256c8 100644 --- a/src/ripple/app/rdb/RelationalDBInterface.h +++ b/src/ripple/app/rdb/RelationalDatabase.h @@ -17,8 +17,8 @@ */ //============================================================================== -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACE_H_INCLUDED +#ifndef RIPPLE_APP_RDB_RELATIONALDATABASE_H_INCLUDED +#define RIPPLE_APP_RDB_RELATIONALDATABASE_H_INCLUDED #include #include @@ -45,7 +45,7 @@ struct LedgerRange uint32_t max; }; -class RelationalDBInterface +class RelationalDatabase { public: struct CountMinMax @@ -135,56 +135,61 @@ class RelationalDBInterface }; /** - * @brief init Creates and returns appropriate interface based on config. + * @brief init Creates and returns an appropriate RelationalDatabase + * instance based on configuration. * @param app Application object. * @param config Config object. * @param jobQueue JobQueue object. * @return Unique pointer to the interface. */ - static std::unique_ptr + static std::unique_ptr init(Application& app, Config const& config, JobQueue& jobQueue); - virtual ~RelationalDBInterface() = default; + virtual ~RelationalDatabase() = default; /** - * @brief getMinLedgerSeq Returns minimum ledger sequence in Ledgers table. - * @return Ledger sequence or none if no ledgers exist. + * @brief getMinLedgerSeq Returns the minimum ledger sequence in the Ledgers + * table. + * @return Ledger sequence or no value if no ledgers exist. */ virtual std::optional getMinLedgerSeq() = 0; /** - * @brief getMaxLedgerSeq Returns maximum ledger sequence in Ledgers table. + * @brief getMaxLedgerSeq Returns the maximum ledger sequence in the Ledgers + * table. * @return Ledger sequence or none if no ledgers exist. */ virtual std::optional getMaxLedgerSeq() = 0; /** - * @brief getLedgerInfoByIndex Returns ledger by its sequence. + * @brief getLedgerInfoByIndex Returns a ledger by its sequence. * @param ledgerSeq Ledger sequence. - * @return Ledger or none if ledger not found. + * @return The ledger if found, otherwise no value. */ virtual std::optional getLedgerInfoByIndex(LedgerIndex ledgerSeq) = 0; /** - * @brief getNewestLedgerInfo Returns info of newest saved ledger. - * @return Ledger info or none if ledger not found. + * @brief getNewestLedgerInfo Returns the info of the newest saved ledger. + * @return Ledger info if found, otherwise no value. */ virtual std::optional getNewestLedgerInfo() = 0; /** - * @brief getLedgerInfoByHash Returns info of ledger with given hash. + * @brief getLedgerInfoByHash Returns the info of the ledger with given + * hash. * @param ledgerHash Hash of the ledger. - * @return Ledger or none if ledger not found. + * @return Ledger if found, otherwise no value. */ virtual std::optional getLedgerInfoByHash(uint256 const& ledgerHash) = 0; /** - * @brief getHashByIndex Returns hash of ledger with given sequence. + * @brief getHashByIndex Returns the hash of the ledger with the given + * sequence. * @param ledgerIndex Ledger sequence. * @return Hash of the ledger. */ @@ -192,39 +197,40 @@ class RelationalDBInterface getHashByIndex(LedgerIndex ledgerIndex) = 0; /** - * @brief getHashesByIndex Returns hash of the ledger and hash of parent - * ledger for the ledger of given sequence. + * @brief getHashesByIndex Returns the hashes of the ledger and its parent + * as specified by the ledgerIndex. * @param ledgerIndex Ledger sequence. - * @return Struct LedgerHashPair which contain hashes of the ledger and - * its parent ledger. + * @return Struct LedgerHashPair which contains hashes of the ledger and + * its parent. */ virtual std::optional getHashesByIndex(LedgerIndex ledgerIndex) = 0; /** - * @brief getHashesByIndex Returns hash of the ledger and hash of parent - * ledger for all ledgers with sequences from given minimum limit - * to given maximum limit. + * @brief getHashesByIndex Returns hashes of each ledger and its parent for + * all ledgers within the provided range. * @param minSeq Minimum ledger sequence. * @param maxSeq Maximum ledger sequence. - * @return Map which points sequence number of found ledger to the struct - * LedgerHashPair which contains ledger hash and its parent hash. + * @return Container that maps the sequence number of a found ledger to the + * struct LedgerHashPair which contains the hashes of the ledger and + * its parent. */ virtual std::map getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) = 0; /** - * @brief getTxHistory Returns most recent 20 transactions starting from - * given number or entry. + * @brief getTxHistory Returns the 20 most recent transactions starting from + * the given number. * @param startIndex First number of returned entry. - * @return Vector of sharded pointers to transactions sorted in + * @return Vector of shared pointers to transactions sorted in * descending order by ledger sequence. */ virtual std::vector> getTxHistory(LedgerIndex startIndex) = 0; /** - * @brief ledgerDbHasSpace Checks if ledger database has available space. + * @brief ledgerDbHasSpace Checks if the ledger database has available + * space. * @param config Config object. * @return True if space is available. */ @@ -232,7 +238,7 @@ class RelationalDBInterface ledgerDbHasSpace(Config const& config) = 0; /** - * @brief transactionDbHasSpace Checks if transaction database has + * @brief transactionDbHasSpace Checks if the transaction database has * available space. * @param config Config object. * @return True if space is available. diff --git a/src/ripple/app/rdb/ShardArchive.h b/src/ripple/app/rdb/ShardArchive.h new file mode 100644 index 00000000000..20c4382b056 --- /dev/null +++ b/src/ripple/app/rdb/ShardArchive.h @@ -0,0 +1,78 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_SHARDARCHIVE_H_INCLUDED +#define RIPPLE_APP_RDB_SHARDARCHIVE_H_INCLUDED + +#include +#include + +namespace ripple { + +/** + * @brief makeArchiveDB Opens the shard archive database and returns its + * descriptor. + * @param dir Path to the database to open. + * @param dbName Name of the database. + * @return Unique pointer to the opened database. + */ +std::unique_ptr +makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName); + +/** + * @brief readArchiveDB Reads entries from the shard archive database and + * invokes the given callback for each entry. + * @param db Session with the database. + * @param func Callback to invoke for each entry. + */ +void +readArchiveDB( + DatabaseCon& db, + std::function const& func); + +/** + * @brief insertArchiveDB Adds an entry to the shard archive database. + * @param db Session with the database. + * @param shardIndex Shard index to add. + * @param url Shard download url to add. + */ +void +insertArchiveDB( + DatabaseCon& db, + std::uint32_t shardIndex, + std::string const& url); + +/** + * @brief deleteFromArchiveDB Deletes an entry from the shard archive database. + * @param db Session with the database. + * @param shardIndex Shard index to remove from the database. + */ +void +deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex); + +/** + * @brief dropArchiveDB Removes a table in the shard archive database. + * @param db Session with the database. + */ +void +dropArchiveDB(DatabaseCon& db); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/State.h b/src/ripple/app/rdb/State.h new file mode 100644 index 00000000000..fe74d5f19d3 --- /dev/null +++ b/src/ripple/app/rdb/State.h @@ -0,0 +1,98 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_STATE_H_INCLUDED +#define RIPPLE_APP_RDB_STATE_H_INCLUDED + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { + +struct SavedState +{ + std::string writableDb; + std::string archiveDb; + LedgerIndex lastRotated; +}; + +/** + * @brief initStateDB Opens a session with the State database. + * @param session Provides a session with the database. + * @param config Path to the database and other opening parameters. + * @param dbName Name of the database. + */ +void +initStateDB( + soci::session& session, + BasicConfig const& config, + std::string const& dbName); + +/** + * @brief getCanDelete Returns the ledger sequence which can be deleted. + * @param session Session with the database. + * @return Ledger sequence. + */ +LedgerIndex +getCanDelete(soci::session& session); + +/** + * @brief setCanDelete Updates the ledger sequence which can be deleted. + * @param session Session with the database. + * @param canDelete Ledger sequence to save. + * @return Previous value of the ledger sequence which can be deleted. + */ +LedgerIndex +setCanDelete(soci::session& session, LedgerIndex canDelete); + +/** + * @brief getSavedState Returns the saved state. + * @param session Session with the database. + * @return The SavedState structure which contains the names of the writable + * database, the archive database and the last rotated ledger sequence. + */ +SavedState +getSavedState(soci::session& session); + +/** + * @brief setSavedState Saves the given state. + * @param session Session with the database. + * @param state The SavedState structure which contains the names of the + * writable database, the archive database and the last rotated ledger + * sequence. + */ +void +setSavedState(soci::session& session, SavedState const& state); + +/** + * @brief setLastRotated Updates the last rotated ledger sequence. + * @param session Session with the database. + * @param seq New value of the last rotated ledger sequence. + */ +void +setLastRotated(soci::session& session, LedgerIndex seq); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/UnitaryShard.h b/src/ripple/app/rdb/UnitaryShard.h new file mode 100644 index 00000000000..d2ac773dbd3 --- /dev/null +++ b/src/ripple/app/rdb/UnitaryShard.h @@ -0,0 +1,155 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_UNITARYSHARD_H_INCLUDED +#define RIPPLE_APP_RDB_UNITARYSHARD_H_INCLUDED + +#include +#include +#include +#include +#include + +namespace ripple { + +struct DatabasePair +{ + std::unique_ptr ledgerDb; + std::unique_ptr transactionDb; +}; + +/** + * @brief makeShardCompleteLedgerDBs Opens shard databases for verified shards + * and returns their descriptors. + * @param config Config object. + * @param setup Path to the databases and other opening parameters. + * @return Pair of unique pointers to the opened ledger and transaction + * databases. + */ +DatabasePair +makeShardCompleteLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup); + +/** + * @brief makeShardIncompleteLedgerDBs Opens shard databases for partially + * downloaded or unverified shards and returns their descriptors. + * @param config Config object. + * @param setup Path to the databases and other opening parameters. + * @param checkpointerSetup Checkpointer parameters. + * @return Pair of unique pointers to the opened ledger and transaction + * databases. + */ +DatabasePair +makeShardIncompleteLedgerDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + +/** + * @brief updateLedgerDBs Saves the given ledger to shard databases. + * @param txdb Session with the transaction databases. + * @param lgrdb Session with the ledger databases. + * @param ledger Ledger to save. + * @param index Index of the shard that owns the ledger. + * @param stop Reference to an atomic flag that can stop the process if raised. + * @param j Journal + * @return True if the ledger was successfully saved. + */ +bool +updateLedgerDBs( + soci::session& txdb, + soci::session& lgrdb, + std::shared_ptr const& ledger, + std::uint32_t index, + std::atomic& stop, + beast::Journal j); + +/** + * @brief makeAcquireDB Opens the shard acquire database and returns its + * descriptor. + * @param setup Path to the database and other opening parameters. + * @param checkpointerSetup Checkpointer parameters. + * @return Unique pointer to the opened database. + */ +std::unique_ptr +makeAcquireDB( + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + +/** + * @brief insertAcquireDBIndex Adds a new shard index to the shard acquire + * database. + * @param session Session with the database. + * @param index Index to add. + */ +void +insertAcquireDBIndex(soci::session& session, std::uint32_t index); + +/** + * @brief selectAcquireDBLedgerSeqs Returns the set of acquired ledgers for + * the given shard. + * @param session Session with the database. + * @param index Shard index. + * @return Pair which contains true if such an index was found in the database, + * and a string which contains the set of ledger sequences. + * If no sequences were saved then the optional will have no value. + */ +std::pair> +selectAcquireDBLedgerSeqs(soci::session& session, std::uint32_t index); + +struct AcquireShardSeqsHash +{ + std::optional sequences; + std::optional hash; +}; + +/** + * @brief selectAcquireDBLedgerSeqsHash Returns the set of acquired ledger + * sequences and the last ledger hash for the shard with the provided + * index. + * @param session Session with the database. + * @param index Shard index. + * @return Pair which contains true if such an index was found in the database + * and the AcquireShardSeqsHash structure which contains a string with + * the ledger sequences and a string with last ledger hash. If the set + * of sequences or hash were not saved then no value is returned. + */ +std::pair +selectAcquireDBLedgerSeqsHash(soci::session& session, std::uint32_t index); + +/** + * @brief updateAcquireDB Updates information in the acquire DB. + * @param session Session with the database. + * @param ledger Ledger to save into the database. + * @param index Shard index. + * @param lastSeq Last acquired ledger sequence. + * @param seqs Current set of acquired ledger sequences if it's not empty. + */ +void +updateAcquireDB( + soci::session& session, + std::shared_ptr const& ledger, + std::uint32_t index, + std::uint32_t lastSeq, + std::optional const& seqs); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/Vacuum.h b/src/ripple/app/rdb/Vacuum.h new file mode 100644 index 00000000000..3db18da045a --- /dev/null +++ b/src/ripple/app/rdb/Vacuum.h @@ -0,0 +1,37 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_VACUUM_H_INCLUDED +#define RIPPLE_APP_RDB_VACUUM_H_INCLUDED + +#include + +namespace ripple { + +/** + * @brief doVacuumDB Creates, initialises, and performs cleanup on a database. + * @param setup Path to the database and other opening parameters. + * @return True if the vacuum process completed successfully. + */ +bool +doVacuumDB(DatabaseCon::Setup const& setup); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/Wallet.h b/src/ripple/app/rdb/Wallet.h new file mode 100644 index 00000000000..6bf6ca9ea83 --- /dev/null +++ b/src/ripple/app/rdb/Wallet.h @@ -0,0 +1,170 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_WALLET_H_INCLUDED +#define RIPPLE_APP_RDB_WALLET_H_INCLUDED + +#include +#include +#include +#include +#include +#include + +namespace ripple { + +/** + * @brief makeWalletDB Opens the wallet database and returns it. + * @param setup Path to the database and other opening parameters. + * @return Unique pointer to the database descriptor. + */ +std::unique_ptr +makeWalletDB(DatabaseCon::Setup const& setup); + +/** + * @brief makeTestWalletDB Opens a test wallet database with an arbitrary name. + * @param setup Path to the database and other opening parameters. + * @param dbname Name of the database. + * @return Unique pointer to the database descriptor. + */ +std::unique_ptr +makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname); + +/** + * @brief getManifests Loads a manifest from the wallet database and stores it + * in the cache. + * @param session Session with the database. + * @param dbTable Name of the database table from which the manifest will be + * extracted. + * @param mCache Cache for storing the manifest. + * @param j Journal. + */ +void +getManifests( + soci::session& session, + std::string const& dbTable, + ManifestCache& mCache, + beast::Journal j); + +/** + * @brief saveManifests Saves all given manifests to the database. + * @param session Session with the database. + * @param dbTable Name of the database table that will store the manifest. + * @param isTrusted Callback that returns true if the key is trusted. + * @param map Maps public keys to manifests. + * @param j Journal. + */ +void +saveManifests( + soci::session& session, + std::string const& dbTable, + std::function const& isTrusted, + hash_map const& map, + beast::Journal j); + +/** + * @brief addValidatorManifest Saves the manifest of a validator to the + * database. + * @param session Session with the database. + * @param serialized Manifest of the validator in raw format. + */ +void +addValidatorManifest(soci::session& session, std::string const& serialized); + +/** + * @brief getNodeIdentity Returns the public and private keys of this node. + * @param session Session with the database. + * @return Pair of public and private keys. + */ +std::pair +getNodeIdentity(soci::session& session); + +/** + * @brief getPeerReservationTable Returns the peer reservation table. + * @param session Session with the database. + * @param j Journal. + * @return Peer reservation hash table. + */ +std::unordered_set, KeyEqual> +getPeerReservationTable(soci::session& session, beast::Journal j); + +/** + * @brief insertPeerReservation Adds an entry to the peer reservation table. + * @param session Session with the database. + * @param nodeId Public key of the node. + * @param description Description of the node. + */ +void +insertPeerReservation( + soci::session& session, + PublicKey const& nodeId, + std::string const& description); + +/** + * @brief deletePeerReservation Deletes an entry from the peer reservation + * table. + * @param session Session with the database. + * @param nodeId Public key of the node to remove. + */ +void +deletePeerReservation(soci::session& session, PublicKey const& nodeId); + +/** + * @brief createFeatureVotes Creates the FeatureVote table if it does not exist. + * @param session Session with the wallet database. + * @return true if the table already exists + */ +bool +createFeatureVotes(soci::session& session); + +// For historical reasons the up-vote and down-vote integer representations +// are unintuitive. +enum class AmendmentVote : int { up = 0, down = 1 }; + +/** + * @brief readAmendments Reads all amendments from the FeatureVotes table. + * @param session Session with the wallet database. + * @param callback Callback called for each amendment with its hash, name and + * optionally a flag denoting whether the amendment should be vetoed. + */ +void +readAmendments( + soci::session& session, + std::function amendment_hash, + boost::optional amendment_name, + boost::optional vote)> const& callback); + +/** + * @brief voteAmendment Set the veto value for a particular amendment. + * @param session Session with the wallet database. + * @param amendment Hash of the amendment. + * @param name Name of the amendment. + * @param vote Whether to vote in favor of this amendment. + */ +void +voteAmendment( + soci::session& session, + uint256 const& amendment, + std::string const& name, + AmendmentVote vote); + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.h b/src/ripple/app/rdb/backend/PostgresDatabase.h similarity index 53% rename from src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.h rename to src/ripple/app/rdb/backend/PostgresDatabase.h index 7149f475fb6..e8673611279 100644 --- a/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.h +++ b/src/ripple/app/rdb/backend/PostgresDatabase.h @@ -17,55 +17,47 @@ */ //============================================================================== -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACEPOSTGRES_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACEPOSTGRES_H_INCLUDED +#ifndef RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED +#define RIPPLE_APP_RDB_BACKEND_POSTGRESDATABASE_H_INCLUDED -#include +#include namespace ripple { -class RelationalDBInterfacePostgres : public RelationalDBInterface +class PostgresDatabase : public RelationalDatabase { public: - /** There is only one implementation of this interface: - * RelationalDBInterfacePostgresImp. It wraps a stoppable object (PgPool) - * that does not follow RAII, and it does not go through the effort of - * following RAII either. The owner of the only object of that type - * (ApplicationImp) holds it by the type of its interface instead of its - * implementation, and thus the lifetime management methods need to be - * part of the interface. - */ virtual void stop() = 0; /** - * @brief sweep Sweep the database. Method is specific for postgres backend. + * @brief sweep Sweeps the database. */ virtual void sweep() = 0; /** - * @brief getCompleteLedgers Returns string which contains list of - * completed ledgers. Method is specific for postgres backend. - * @return String with completed ledger numbers + * @brief getCompleteLedgers Returns a string which contains a list of + * completed ledgers. + * @return String with completed ledger sequences */ virtual std::string getCompleteLedgers() = 0; /** - * @brief getValidatedLedgerAge Returns age of last - * validated ledger. Method is specific for postgres backend. - * @return Age of last validated ledger in seconds + * @brief getValidatedLedgerAge Returns the age of the last validated + * ledger. + * @return Age of the last validated ledger in seconds */ virtual std::chrono::seconds getValidatedLedgerAge() = 0; /** - * @brief writeLedgerAndTransactions Write new ledger and transaction data - * into database. Method is specific for Postgres backend. + * @brief writeLedgerAndTransactions Writes new ledger and transaction data + * into the database. * @param info Ledger info to write. * @param accountTxData Transaction data to write - * @return True if success, false if failure. + * @return True on success, false on failure. */ virtual bool writeLedgerAndTransactions( @@ -73,32 +65,30 @@ class RelationalDBInterfacePostgres : public RelationalDBInterface std::vector const& accountTxData) = 0; /** - * @brief getTxHashes Returns vector of tx hashes by given ledger - * sequence. Method is specific to postgres backend. + * @brief getTxHashes Returns a vector of the hashes of transactions + * belonging to the ledger with the provided sequence. * @param seq Ledger sequence - * @return Vector of tx hashes + * @return Vector of transaction hashes */ virtual std::vector getTxHashes(LedgerIndex seq) = 0; /** - * @brief getAccountTx Get last account transactions specifies by - * passed argumenrs structure. Function if specific to postgres - * backend. - * @param args Arguments which specify account and whose tx to return. - * @param app Application - * @param j Journal - * @return Vector of account transactions and RPC status of responce. + * @brief getAccountTx Get the last account transactions specified by the + * AccountTxArgs struct. + * @param args Arguments which specify the account and which transactions to + * return. + * @return Vector of account transactions and the RPC status response. */ virtual std::pair getAccountTx(AccountTxArgs const& args) = 0; /** * @brief locateTransaction Returns information used to locate - * a transaction. Function is specific to postgres backend. + * a transaction. * @param id Hash of the transaction. * @return Information used to locate a transaction. Contains a nodestore - * hash and ledger sequence pair if the transaction was found. + * hash and a ledger sequence pair if the transaction was found. * Otherwise, contains the range of ledgers present in the database * at the time of search. */ @@ -110,9 +100,9 @@ class RelationalDBInterfacePostgres : public RelationalDBInterface * network * @param[out] reason if the database is not caught up, reason contains a * helpful message describing why - * @return false if the most recently written - * ledger has a close time over 3 minutes ago, or if there are - * no ledgers in the database. true otherwise + * @return false if the most recently written ledger has a close time + * over 3 minutes ago, or if there are no ledgers in the + * database. true otherwise */ virtual bool isCaughtUp(std::string& reason) = 0; diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp b/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp deleted file mode 100644 index 8b9a962106e..00000000000 --- a/src/ripple/app/rdb/backend/RelationalDBInterfacePostgres.cpp +++ /dev/null @@ -1,298 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -class RelationalDBInterfacePostgresImp : public RelationalDBInterfacePostgres -{ -public: - RelationalDBInterfacePostgresImp( - Application& app, - Config const& config, - JobQueue& jobQueue) - : app_(app) - , j_(app_.journal("PgPool")) - , pgPool_( -#ifdef RIPPLED_REPORTING - make_PgPool(config.section("ledger_tx_tables"), j_) -#endif - ) - { - assert(config.reporting()); -#ifdef RIPPLED_REPORTING - if (config.reporting() && !config.reportingReadOnly()) // use pg - { - initSchema(pgPool_); - } -#endif - } - - void - stop() override - { -#ifdef RIPPLED_REPORTING - pgPool_->stop(); -#endif - } - - void - sweep() override; - - std::optional - getMinLedgerSeq() override; - - std::optional - getMaxLedgerSeq() override; - - std::string - getCompleteLedgers() override; - - std::chrono::seconds - getValidatedLedgerAge() override; - - bool - writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) override; - - std::optional - getLedgerInfoByIndex(LedgerIndex ledgerSeq) override; - - std::optional - getNewestLedgerInfo() override; - - std::optional - getLedgerInfoByHash(uint256 const& ledgerHash) override; - - uint256 - getHashByIndex(LedgerIndex ledgerIndex) override; - - std::optional - getHashesByIndex(LedgerIndex ledgerIndex) override; - - std::map - getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override; - - std::vector - getTxHashes(LedgerIndex seq) override; - - std::vector> - getTxHistory(LedgerIndex startIndex) override; - - std::pair - getAccountTx(AccountTxArgs const& args) override; - - Transaction::Locator - locateTransaction(uint256 const& id) override; - - bool - ledgerDbHasSpace(Config const& config) override; - - bool - transactionDbHasSpace(Config const& config) override; - - bool - isCaughtUp(std::string& reason) override; - -private: - Application& app_; - beast::Journal j_; - std::shared_ptr pgPool_; - - bool - dbHasSpace(Config const& config); -}; - -void -RelationalDBInterfacePostgresImp::sweep() -{ -#ifdef RIPPLED_REPORTING - pgPool_->idleSweeper(); -#endif -} - -std::optional -RelationalDBInterfacePostgresImp::getMinLedgerSeq() -{ - return ripple::getMinLedgerSeq(pgPool_, j_); -} - -std::optional -RelationalDBInterfacePostgresImp::getMaxLedgerSeq() -{ - return ripple::getMaxLedgerSeq(pgPool_); -} - -std::string -RelationalDBInterfacePostgresImp::getCompleteLedgers() -{ - return ripple::getCompleteLedgers(pgPool_); -} - -std::chrono::seconds -RelationalDBInterfacePostgresImp::getValidatedLedgerAge() -{ - return ripple::getValidatedLedgerAge(pgPool_, j_); -} - -bool -RelationalDBInterfacePostgresImp::writeLedgerAndTransactions( - LedgerInfo const& info, - std::vector const& accountTxData) -{ - return ripple::writeLedgerAndTransactions(pgPool_, info, accountTxData, j_); -} - -std::optional -RelationalDBInterfacePostgresImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) -{ - return ripple::getLedgerInfoByIndex(pgPool_, ledgerSeq, app_); -} - -std::optional -RelationalDBInterfacePostgresImp::getNewestLedgerInfo() -{ - return ripple::getNewestLedgerInfo(pgPool_, app_); -} - -std::optional -RelationalDBInterfacePostgresImp::getLedgerInfoByHash(uint256 const& ledgerHash) -{ - return ripple::getLedgerInfoByHash(pgPool_, ledgerHash, app_); -} - -uint256 -RelationalDBInterfacePostgresImp::getHashByIndex(LedgerIndex ledgerIndex) -{ - return ripple::getHashByIndex(pgPool_, ledgerIndex, app_); -} - -std::optional -RelationalDBInterfacePostgresImp::getHashesByIndex(LedgerIndex ledgerIndex) -{ - LedgerHashPair p; - if (!ripple::getHashesByIndex( - pgPool_, ledgerIndex, p.ledgerHash, p.parentHash, app_)) - return {}; - return p; -} - -std::map -RelationalDBInterfacePostgresImp::getHashesByIndex( - LedgerIndex minSeq, - LedgerIndex maxSeq) -{ - return ripple::getHashesByIndex(pgPool_, minSeq, maxSeq, app_); -} - -std::vector -RelationalDBInterfacePostgresImp::getTxHashes(LedgerIndex seq) -{ - return ripple::getTxHashes(pgPool_, seq, app_); -} - -std::vector> -RelationalDBInterfacePostgresImp::getTxHistory(LedgerIndex startIndex) -{ - return ripple::getTxHistory(pgPool_, startIndex, app_, j_); -} - -std::pair -RelationalDBInterfacePostgresImp::getAccountTx(AccountTxArgs const& args) -{ - return ripple::getAccountTx(pgPool_, args, app_, j_); -} - -Transaction::Locator -RelationalDBInterfacePostgresImp::locateTransaction(uint256 const& id) -{ - return ripple::locateTransaction(pgPool_, id, app_); -} - -bool -RelationalDBInterfacePostgresImp::dbHasSpace(Config const& config) -{ - /* Postgres server could be running on a different machine. */ - - return true; -} - -bool -RelationalDBInterfacePostgresImp::ledgerDbHasSpace(Config const& config) -{ - return dbHasSpace(config); -} - -bool -RelationalDBInterfacePostgresImp::transactionDbHasSpace(Config const& config) -{ - return dbHasSpace(config); -} - -std::unique_ptr -getRelationalDBInterfacePostgres( - Application& app, - Config const& config, - JobQueue& jobQueue) -{ - return std::make_unique( - app, config, jobQueue); -} -bool -RelationalDBInterfacePostgresImp::isCaughtUp(std::string& reason) -{ -#ifdef RIPPLED_REPORTING - using namespace std::chrono_literals; - auto age = PgQuery(pgPool_)("SELECT age()"); - if (!age || age.isNull()) - { - reason = "No ledgers in database"; - return false; - } - if (std::chrono::seconds{age.asInt()} > 3min) - { - reason = "No recently-published ledger"; - return false; - } -#endif - return true; -} - -} // namespace ripple diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h b/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h deleted file mode 100644 index 085f59628d1..00000000000 --- a/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.h +++ /dev/null @@ -1,302 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACESQLITE_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACESQLITE_H_INCLUDED - -#include - -namespace ripple { - -class RelationalDBInterfaceSqlite : public RelationalDBInterface -{ -public: - /** - * @brief getTransactionsMinLedgerSeq Returns minimum ledger sequence - * among records in the Transactions table. - * @return Ledger sequence or none if no ledgers exist. - */ - virtual std::optional - getTransactionsMinLedgerSeq() = 0; - - /** - * @brief getAccountTransactionsMinLedgerSeq Returns minimum ledger - * sequence among records in the AccountTransactions table. - * @return Ledger sequence or none if no ledgers exist. - */ - virtual std::optional - getAccountTransactionsMinLedgerSeq() = 0; - - /** - * @brief deleteTransactionByLedgerSeq Deletes transactions from ledger - * with given sequence. - * @param ledgerSeq Ledger sequence. - */ - virtual void - deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) = 0; - - /** - * @brief deleteBeforeLedgerSeq Deletes all ledgers with given sequence - * and all sequences below it. - * @param ledgerSeq Ledger sequence. - */ - virtual void - deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; - - /** - * @brief deleteTransactionsBeforeLedgerSeq Deletes all transactions with - * given ledger sequence and all sequences below it. - * @param ledgerSeq Ledger sequence. - */ - virtual void - deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; - - /** - * @brief deleteAccountTransactionsBeforeLedgerSeq Deletes all account - * transactions with given ledger sequence and all sequences - * below it. - * @param ledgerSeq Ledger sequence. - */ - virtual void - deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; - - /** - * @brief getTransactionCount Returns number of transactions. - * @return Number of transactions. - */ - virtual std::size_t - getTransactionCount() = 0; - - /** - * @brief getAccountTransactionCount Returns number of account - * transactions. - * @return Number of account transactions. - */ - virtual std::size_t - getAccountTransactionCount() = 0; - - /** - * @brief getLedgerCountMinMax Returns minumum ledger sequence, - * maximum ledger sequence and total number of saved ledgers. - * @return Struct CountMinMax which contain minimum sequence, - * maximum sequence and number of ledgers. - */ - virtual struct CountMinMax - getLedgerCountMinMax() = 0; - - /** - * @brief saveValidatedLedger Saves ledger into database. - * @param ledger The ledger. - * @param current True if ledger is current. - * @return True is saving was successfull. - */ - virtual bool - saveValidatedLedger( - std::shared_ptr const& ledger, - bool current) = 0; - - /** - * @brief getLimitedOldestLedgerInfo Returns info of oldest ledger - * from ledgers with sequences greater or equal to given. - * @param ledgerFirstIndex Minimum ledger sequence. - * @return Ledger info or none if ledger not found. - */ - virtual std::optional - getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) = 0; - - /** - * @brief getLimitedNewestLedgerInfo Returns info of newest ledger - * from ledgers with sequences greater or equal to given. - * @param ledgerFirstIndex Minimum ledger sequence. - * @return Ledger info or none if ledger not found. - */ - virtual std::optional - getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) = 0; - - /** - * @brief getOldestAccountTxs Returns oldest transactions for given - * account which match given criteria starting from given offset. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. - * @return Vector of pairs of found transactions and their metadata - * sorted in ascending order by account sequence. - */ - virtual AccountTxs - getOldestAccountTxs(AccountTxOptions const& options) = 0; - - /** - * @brief getNewestAccountTxs Returns newest transactions for given - * account which match given criteria starting from given offset. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. - * @return Vector of pairs of found transactions and their metadata - * sorted in descending order by account sequence. - */ - virtual AccountTxs - getNewestAccountTxs(AccountTxOptions const& options) = 0; - - /** - * @brief getOldestAccountTxsB Returns oldest transactions in binary form - * for given account which match given criteria starting from given - * offset. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. - * @return Vector of tuples of found transactions, their metadata and - * account sequences sorted in ascending order by account sequence. - */ - virtual MetaTxsList - getOldestAccountTxsB(AccountTxOptions const& options) = 0; - - /** - * @brief getNewestAccountTxsB Returns newest transactions in binary form - * for given account which match given criteria starting from given - * offset. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. - * @return Vector of tuples of found transactions, their metadata and - * account sequences sorted in descending order by account - * sequence. - */ - virtual MetaTxsList - getNewestAccountTxsB(AccountTxOptions const& options) = 0; - - /** - * @brief oldestAccountTxPage Returns oldest transactions for given - * account which match given criteria starting from given marker. - * @param options Struct AccountTxPageOptions which contain criteria to - * match: the account, minimum and maximum ledger numbers to search, - * marker of first returned entry, number of transactions to return, - * flag if this number unlimited. - * @return Vector of pairs of found transactions and their metadata - * sorted in ascending order by account sequence and marker - * for next search if search not finished. - */ - virtual std::pair> - oldestAccountTxPage(AccountTxPageOptions const& options) = 0; - - /** - * @brief newestAccountTxPage Returns newest transactions for given - * account which match given criteria starting from given marker. - * @param options Struct AccountTxPageOptions which contain criteria to - * match: the account, minimum and maximum ledger numbers to search, - * marker of first returned entry, number of transactions to return, - * flag if this number unlimited. - * @return Vector of pairs of found transactions and their metadata - * sorted in descending order by account sequence and marker - * for next search if search not finished. - */ - virtual std::pair> - newestAccountTxPage(AccountTxPageOptions const& options) = 0; - - /** - * @brief oldestAccountTxPageB Returns oldest transactions in binary form - * for given account which match given criteria starting from given - * marker. - * @param options Struct AccountTxPageOptions which contain criteria to - * match: the account, minimum and maximum ledger numbers to search, - * marker of first returned entry, number of transactions to return, - * flag if this number unlimited. - * @return Vector of tuples of found transactions, their metadata and - * account sequences sorted in ascending order by account - * sequence and marker for next search if search not finished. - */ - virtual std::pair> - oldestAccountTxPageB(AccountTxPageOptions const& options) = 0; - - /** - * @brief newestAccountTxPageB Returns newest transactions in binary form - * for given account which match given criteria starting from given - * marker. - * @param options Struct AccountTxPageOptions which contain criteria to - * match: the account, minimum and maximum ledger numbers to search, - * marker of first returned entry, number of transactions to return, - * flag if this number unlimited. - * @return Vector of tuples of found transactions, their metadata and - * account sequences sorted in descending order by account - * sequence and marker for next search if search not finished. - */ - virtual std::pair> - newestAccountTxPageB(AccountTxPageOptions const& options) = 0; - - /** - * @brief getTransaction Returns transaction with given hash. If not found - * and range given then check if all ledgers from the range are - * present in the database. - * @param id Hash of the transaction. - * @param range Range of ledgers to check, if present. - * @param ec Default value of error code. - * @return Transaction and its metadata if found, TxSearched::all if range - * given and all ledgers from range are present in the database, - * TxSearched::some if range given and not all ledgers are present, - * TxSearched::unknown if range not given or deserializing error - * occured. In the last case error code returned via ec link - * parameter, in other cases default error code not changed. - */ - virtual std::variant - getTransaction( - uint256 const& id, - std::optional> const& range, - error_code_i& ec) = 0; - - /** - * @brief getKBUsedAll Returns space used by all databases. - * @return Space in kilobytes. - */ - virtual uint32_t - getKBUsedAll() = 0; - - /** - * @brief getKBUsedLedger Returns space used by ledger database. - * @return Space in kilobytes. - */ - virtual uint32_t - getKBUsedLedger() = 0; - - /** - * @brief getKBUsedTransaction Returns space used by transaction - * database. - * @return Space in kilobytes. - */ - virtual uint32_t - getKBUsedTransaction() = 0; - - /** - * @brief Closes the ledger database - */ - virtual void - closeLedgerDB() = 0; - - /** - * @brief Closes the transaction database - */ - virtual void - closeTransactionDB() = 0; -}; - -} // namespace ripple - -#endif diff --git a/src/ripple/app/rdb/backend/SQLiteDatabase.h b/src/ripple/app/rdb/backend/SQLiteDatabase.h new file mode 100644 index 00000000000..07f9be3e228 --- /dev/null +++ b/src/ripple/app/rdb/backend/SQLiteDatabase.h @@ -0,0 +1,313 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_BACKEND_SQLITEDATABASE_H_INCLUDED +#define RIPPLE_APP_RDB_BACKEND_SQLITEDATABASE_H_INCLUDED + +#include + +namespace ripple { + +class SQLiteDatabase : public RelationalDatabase +{ +public: + /** + * @brief getTransactionsMinLedgerSeq Returns the minimum ledger sequence + * stored in the Transactions table. + * @return Ledger sequence or no value if no ledgers exist. + */ + virtual std::optional + getTransactionsMinLedgerSeq() = 0; + + /** + * @brief getAccountTransactionsMinLedgerSeq Returns the minimum ledger + * sequence stored in the AccountTransactions table. + * @return Ledger sequence or no value if no ledgers exist. + */ + virtual std::optional + getAccountTransactionsMinLedgerSeq() = 0; + + /** + * @brief deleteTransactionByLedgerSeq Deletes transactions from the ledger + * with the given sequence. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief deleteBeforeLedgerSeq Deletes all ledgers with a sequence number + * less than or equal to the given ledger sequence. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief deleteTransactionsBeforeLedgerSeq Deletes all transactions with + * a sequence number less than or equal to the given ledger + * sequence. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief deleteAccountTransactionsBeforeLedgerSeq Deletes all account + * transactions with a sequence number less than or equal to the + * given ledger sequence. + * @param ledgerSeq Ledger sequence. + */ + virtual void + deleteAccountTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) = 0; + + /** + * @brief getTransactionCount Returns the number of transactions. + * @return Number of transactions. + */ + virtual std::size_t + getTransactionCount() = 0; + + /** + * @brief getAccountTransactionCount Returns the number of account + * transactions. + * @return Number of account transactions. + */ + virtual std::size_t + getAccountTransactionCount() = 0; + + /** + * @brief getLedgerCountMinMax Returns the minimum ledger sequence, + * maximum ledger sequence and total number of saved ledgers. + * @return Struct CountMinMax which contains the minimum sequence, + * maximum sequence and number of ledgers. + */ + virtual struct CountMinMax + getLedgerCountMinMax() = 0; + + /** + * @brief saveValidatedLedger Saves a ledger into the database. + * @param ledger The ledger. + * @param current True if the ledger is current. + * @return True if saving was successful. + */ + virtual bool + saveValidatedLedger( + std::shared_ptr const& ledger, + bool current) = 0; + + /** + * @brief getLimitedOldestLedgerInfo Returns the info of the oldest ledger + * whose sequence number is greater than or equal to the given + * sequence number. + * @param ledgerFirstIndex Minimum ledger sequence. + * @return Ledger info if found, otherwise no value. + */ + virtual std::optional + getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) = 0; + + /** + * @brief getLimitedNewestLedgerInfo Returns the info of the newest ledger + * whose sequence number is greater than or equal to the given + * sequence number. + * @param ledgerFirstIndex Minimum ledger sequence. + * @return Ledger info if found, otherwise no value. + */ + virtual std::optional + getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) = 0; + + /** + * @brief getOldestAccountTxs Returns the oldest transactions for the + * account that matches the given criteria starting from the provided + * offset. + * @param options Struct AccountTxOptions which contains the criteria to + * match: the account, ledger search range, the offset of the first + * entry to return, the number of transactions to return, a flag if + * this number is unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in ascending order by account sequence. + */ + virtual AccountTxs + getOldestAccountTxs(AccountTxOptions const& options) = 0; + + /** + * @brief getNewestAccountTxs Returns the newest transactions for the + * account that matches the given criteria starting from the provided + * offset. + * @param options Struct AccountTxOptions which contains the criteria to + * match: the account, the ledger search range, the offset of the + * first entry to return, the number of transactions to return, a + * flag if this number unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in descending order by account sequence. + */ + virtual AccountTxs + getNewestAccountTxs(AccountTxOptions const& options) = 0; + + /** + * @brief getOldestAccountTxsB Returns the oldest transactions in binary + * form for the account that matches the given criteria starting from + * the provided offset. + * @param options Struct AccountTxOptions which contains the criteria to + * match: the account, the ledger search range, the offset of the + * first entry to return, the number of transactions to return, a + * flag if this number unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in ascending order by account sequence. + */ + virtual MetaTxsList + getOldestAccountTxsB(AccountTxOptions const& options) = 0; + + /** + * @brief getNewestAccountTxsB Returns the newest transactions in binary + * form for the account that matches the given criteria starting from + * the provided offset. + * @param options Struct AccountTxOptions which contains the criteria to + * match: the account, the ledger search range, the offset of the + * first entry to return, the number of transactions to return, a + * flag if this number is unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in descending order by account + * sequence. + */ + virtual MetaTxsList + getNewestAccountTxsB(AccountTxOptions const& options) = 0; + + /** + * @brief oldestAccountTxPage Returns the oldest transactions for the + * account that matches the given criteria starting from the + * provided marker. + * @param options Struct AccountTxPageOptions which contains the criteria to + * match: the account, the ledger search range, the marker of first + * returned entry, the number of transactions to return, a flag if + * this number is unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in ascending order by account sequence and a marker + * for the next search if the search was not finished. + */ + virtual std::pair> + oldestAccountTxPage(AccountTxPageOptions const& options) = 0; + + /** + * @brief newestAccountTxPage Returns the newest transactions for the + * account that matches the given criteria starting from the provided + * marker. + * @param options Struct AccountTxPageOptions which contains the criteria to + * match: the account, the ledger search range, the marker of the + * first returned entry, the number of transactions to return, a flag + * if this number unlimited. + * @return Vector of pairs of found transactions and their metadata + * sorted in descending order by account sequence and a marker + * for the next search if the search was not finished. + */ + virtual std::pair> + newestAccountTxPage(AccountTxPageOptions const& options) = 0; + + /** + * @brief oldestAccountTxPageB Returns the oldest transactions in binary + * form for the account that matches the given criteria starting from + * the provided marker. + * @param options Struct AccountTxPageOptions which contains criteria to + * match: the account, the ledger search range, the marker of the + * first returned entry, the number of transactions to return, a flag + * if this number unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in ascending order by account + * sequence and a marker for the next search if the search was not + * finished. + */ + virtual std::pair> + oldestAccountTxPageB(AccountTxPageOptions const& options) = 0; + + /** + * @brief newestAccountTxPageB Returns the newest transactions in binary + * form for the account that matches the given criteria starting from + * the provided marker. + * @param options Struct AccountTxPageOptions which contains the criteria to + * match: the account, the ledger search range, the marker of the + * first returned entry, the number of transactions to return, a flag + * if this number is unlimited. + * @return Vector of tuples of found transactions, their metadata and + * account sequences sorted in descending order by account + * sequence and a marker for the next search if the search was not + * finished. + */ + virtual std::pair> + newestAccountTxPageB(AccountTxPageOptions const& options) = 0; + + /** + * @brief getTransaction Returns the transaction with the given hash. If a + * range is provided but the transaction is not found, then check if + * all ledgers in the range are present in the database. + * @param id Hash of the transaction. + * @param range Range of ledgers to check, if present. + * @param ec Default error code value. + * @return Transaction and its metadata if found, otherwise TxSearched::all + * if a range is provided and all ledgers from the range are present + * in the database, TxSearched::some if a range is provided and not + * all ledgers are present, TxSearched::unknown if the range is not + * provided or a deserializing error occurred. In the last case the + * error code is returned via the ec parameter, in other cases the + * default error code is not changed. + */ + virtual std::variant + getTransaction( + uint256 const& id, + std::optional> const& range, + error_code_i& ec) = 0; + + /** + * @brief getKBUsedAll Returns the amount of space used by all databases. + * @return Space in kilobytes. + */ + virtual uint32_t + getKBUsedAll() = 0; + + /** + * @brief getKBUsedLedger Returns the amount of space space used by the + * ledger database. + * @return Space in kilobytes. + */ + virtual uint32_t + getKBUsedLedger() = 0; + + /** + * @brief getKBUsedTransaction Returns the amount of space used by the + * transaction database. + * @return Space in kilobytes. + */ + virtual uint32_t + getKBUsedTransaction() = 0; + + /** + * @brief Closes the ledger database + */ + virtual void + closeLedgerDB() = 0; + + /** + * @brief Closes the transaction database + */ + virtual void + closeTransactionDB() = 0; +}; + +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/RelationalDBInterface_nodes.h b/src/ripple/app/rdb/backend/detail/Node.h similarity index 94% rename from src/ripple/app/rdb/RelationalDBInterface_nodes.h rename to src/ripple/app/rdb/backend/detail/Node.h index 338e94fb2a7..fa7e39c8329 100644 --- a/src/ripple/app/rdb/RelationalDBInterface_nodes.h +++ b/src/ripple/app/rdb/backend/detail/Node.h @@ -17,18 +17,19 @@ */ //============================================================================== -#ifndef RIPPLE_CORE_RELATIONALDBINTERFACE_NODES_H_INCLUDED -#define RIPPLE_CORE_RELATIONALDBINTERFACE_NODES_H_INCLUDED +#ifndef RIPPLE_APP_RDB_BACKEND_DETAIL_NODE_H_INCLUDED +#define RIPPLE_APP_RDB_BACKEND_DETAIL_NODE_H_INCLUDED #include #include -#include +#include #include #include #include #include namespace ripple { +namespace detail { /* Need to change TableTypeCount if TableType is modified. */ enum class TableType { Ledgers, Transactions, AccountTransactions }; @@ -116,7 +117,7 @@ getRows(soci::session& session, TableType type); * @return Struct CountMinMax which contain minimum sequence, * maximum sequence and number of rows. */ -RelationalDBInterface::CountMinMax +RelationalDatabase::CountMinMax getRowsMinMax(soci::session& session, TableType type); /** @@ -232,7 +233,7 @@ getHashesByIndex( * @param maxSeq Maximum ledger sequence. * @param j Journal. * @return Map which points sequence number of found ledger to the struct - * LedgerHashPair which contauns ledger hash and its parent hash. + * LedgerHashPair which contains ledger hash and its parent hash. */ std::map getHashesByIndex( @@ -283,12 +284,12 @@ getTxHistory( * skipped. We need to skip some quantity of transactions if option * offset is > 0 in the options structure. */ -std::pair +std::pair getOldestAccountTxs( soci::session& session, Application& app, LedgerMaster& ledgerMaster, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j); @@ -314,12 +315,12 @@ getOldestAccountTxs( * skipped. We need to skip some quantity of transactions if option * offset is > 0 in the options structure. */ -std::pair +std::pair getNewestAccountTxs( soci::session& session, Application& app, LedgerMaster& ledgerMaster, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j); @@ -345,11 +346,11 @@ getNewestAccountTxs( * skipped. We need to skip some quantity of transactions if option * offset is > 0 in the options structure. */ -std::pair, int> +std::pair, int> getOldestAccountTxsB( soci::session& session, Application& app, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j); @@ -375,11 +376,11 @@ getOldestAccountTxsB( * skipped. We need to skip some quantity of transactions if option * offset is > 0 in the options structure. */ -std::pair, int> +std::pair, int> getNewestAccountTxsB( soci::session& session, Application& app, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j); @@ -404,7 +405,7 @@ getNewestAccountTxsB( * sequence and marker for next search if search not finished. * Also number of transactions processed during this call. */ -std::pair, int> +std::pair, int> oldestAccountTxPage( soci::session& session, AccountIDCache const& idCache, @@ -412,7 +413,7 @@ oldestAccountTxPage( std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, - RelationalDBInterface::AccountTxPageOptions const& options, + RelationalDatabase::AccountTxPageOptions const& options, int limit_used, std::uint32_t page_length); @@ -437,7 +438,7 @@ oldestAccountTxPage( * sequence and marker for next search if search not finished. * Also number of transactions processed during this call. */ -std::pair, int> +std::pair, int> newestAccountTxPage( soci::session& session, AccountIDCache const& idCache, @@ -445,7 +446,7 @@ newestAccountTxPage( std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, - RelationalDBInterface::AccountTxPageOptions const& options, + RelationalDatabase::AccountTxPageOptions const& options, int limit_used, std::uint32_t page_length); @@ -465,7 +466,7 @@ newestAccountTxPage( * occured. In the last case error code modified in ec link * parameter, in other cases default error code remained. */ -std::variant +std::variant getTransaction( soci::session& session, Application& app, @@ -483,6 +484,7 @@ getTransaction( bool dbHasSpace(soci::session& session, Config const& config, beast::Journal j); +} // namespace detail } // namespace ripple #endif diff --git a/src/ripple/app/rdb/backend/detail/Shard.h b/src/ripple/app/rdb/backend/detail/Shard.h new file mode 100644 index 00000000000..ac88c24bd78 --- /dev/null +++ b/src/ripple/app/rdb/backend/detail/Shard.h @@ -0,0 +1,90 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_APP_RDB_BACKEND_DETAIL_SHARD_H_INCLUDED +#define RIPPLE_APP_RDB_BACKEND_DETAIL_SHARD_H_INCLUDED + +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace detail { + +/** + * @brief makeMetaDBs Opens ledger and transaction 'meta' databases which + * map ledger hashes and transaction IDs to the index of the shard + * that holds the ledger or transaction. + * @param config Config object. + * @param setup Path to database and opening parameters. + * @param checkpointerSetup Database checkpointer setup. + * @return Struct DatabasePair which contains unique pointers to the ledger + * and transaction databases. + */ +DatabasePair +makeMetaDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup); + +/** + * @brief saveLedgerMeta Stores (transaction ID -> shard index) and + * (ledger hash -> shard index) mappings in the meta databases. + * @param ledger The ledger. + * @param app Application object. + * @param lgrMetaSession Session to ledger meta database. + * @param txnMetaSession Session to transaction meta database. + * @param shardIndex The index of the shard that contains this ledger. + * @return True on success. + */ +bool +saveLedgerMeta( + std::shared_ptr const& ledger, + Application& app, + soci::session& lgrMetaSession, + soci::session& txnMetaSession, + std::uint32_t shardIndex); + +/** + * @brief getShardIndexforLedger Queries the ledger meta database to + * retrieve the index of the shard that contains this ledger. + * @param session Session to the database. + * @param hash Hash of the ledger. + * @return The index of the shard on success, otherwise an unseated value. + */ +std::optional +getShardIndexforLedger(soci::session& session, LedgerHash const& hash); + +/** + * @brief getShardIndexforTransaction Queries the transaction meta database to + * retrieve the index of the shard that contains this transaction. + * @param session Session to the database. + * @param id ID of the transaction. + * @return The index of the shard on success, otherwise an unseated value. + */ +std::optional +getShardIndexforTransaction(soci::session& session, TxID const& id); + +} // namespace detail +} // namespace ripple + +#endif diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp b/src/ripple/app/rdb/backend/detail/impl/Node.cpp similarity index 86% rename from src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp rename to src/ripple/app/rdb/backend/detail/impl/Node.cpp index c067bfe0cd0..0c9f3b0171f 100644 --- a/src/ripple/app/rdb/impl/RelationalDBInterface_nodes.cpp +++ b/src/ripple/app/rdb/backend/detail/impl/Node.cpp @@ -23,8 +23,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -35,10 +35,11 @@ #include namespace ripple { +namespace detail { /** - * @brief to_string Returns name of table by table ID. - * @param type Table ID. + * @brief to_string Returns the name of a table according to its TableType. + * @param type An enum denoting the table's type. * @return Name of the table. */ static std::string @@ -47,6 +48,7 @@ to_string(TableType type) static_assert( TableTypeCount == 3, "Need to modify switch statement if enum is modified"); + switch (type) { case TableType::Ledgers: @@ -56,7 +58,7 @@ to_string(TableType type) case TableType::AccountTransactions: return "AccountTransactions"; default: - assert(0); + assert(false); return "Unknown"; } } @@ -166,10 +168,10 @@ getRows(soci::session& session, TableType type) return rows; } -RelationalDBInterface::CountMinMax +RelationalDatabase::CountMinMax getRowsMinMax(soci::session& session, TableType type) { - RelationalDBInterface::CountMinMax res; + RelationalDatabase::CountMinMax res; session << "SELECT COUNT(*) AS rows, " "MIN(LedgerSeq) AS first, " "MAX(LedgerSeq) AS last " @@ -378,12 +380,12 @@ saveValidatedLedger( } /** - * @brief getLedgerInfo Returns info of ledger with special condition - * given as SQL query. - * @param session Session with database. - * @param sqlSuffix Special condition for found the ledger. + * @brief getLedgerInfo Returns the info of the ledger retrieved from the + * database by using the provided SQL query suffix. + * @param session Session with the database. + * @param sqlSuffix SQL string used to specify the sought ledger. * @param j Journal. - * @return Ledger info or none if ledger not found. + * @return Ledger info or no value if the ledger was not found. */ static std::optional getLedgerInfo( @@ -674,21 +676,22 @@ getTxHistory( } /** - * @brief transactionsSQL Returns SQL query to select oldest or newest - * transactions in decoded or binary form for given account which - * match given criteria starting from given offset. + * @brief transactionsSQL Returns a SQL query for selecting the oldest or newest + * transactions in decoded or binary form for the account that matches + * the given criteria starting from the provided offset. * @param app Application object. - * @param selection List of table fields to select from database. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. + * @param selection List of table fields to select from the database. + * @param options Struct AccountTxOptions which contains the criteria to match: + * the account, the ledger search range, the offset of the first entry to + * return, the number of transactions to return, and a flag if this + * number is unlimited. * @param limit_used Number of transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. + * to other shard databases, if shard databases are used. + * No value if the node database is used. * @param descending True for descending order, false for ascending. * @param binary True for binary form, false for decoded. - * @param count True for count number of transaction, false for select it. + * @param count True for counting the number of transactions, false for + * selecting them. * @param j Journal. * @return SQL query string. */ @@ -696,7 +699,7 @@ static std::string transactionsSQL( Application& app, std::string selection, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, bool descending, bool binary, @@ -781,39 +784,40 @@ transactionsSQL( } /** - * @brief getAccountTxs Returns oldest or newest transactions for given - * account which match given criteria starting from given offset. - * @param session Session with database. + * @brief getAccountTxs Returns the oldest or newest transactions for the + * account that matches the given criteria starting from the provided + * offset. + * @param session Session with the database. * @param app Application object. * @param ledgerMaster LedgerMaster object. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. + * @param options Struct AccountTxOptions which contains the criteria to match: + * the account, the ledger search range, the offset of the first entry to + * return, the number of transactions to return, and a flag if this + * number is unlimited. * @param limit_used Number of transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. + * to other shard databases, if shard databases are used. + * No value if the node database is used. * @param descending True for descending order, false for ascending. * @param j Journal. - * @return Vector of pairs of found transactions and its metadata - * sorted in given order by account sequence. - * Also the number of transactions processed or skipped. - * If this number is >= 0, then it means number of transactions - * processed, if it is < 0, then -number means number of transactions - * skipped. We need to skip some quantity of transactions if option - * offset is > 0 in the options structure. + * @return Vector of pairs of found transactions and their metadata sorted by + * account sequence in the specified order along with the number of + * transactions processed or skipped. If this number is >= 0, then it + * represents the number of transactions processed, if it is < 0, then + * -number represents the number of transactions skipped. We need to + * skip some number of transactions if option offset is > 0 in the + * options structure. */ -static std::pair +static std::pair getAccountTxs( soci::session& session, Application& app, LedgerMaster& ledgerMaster, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, bool descending, beast::Journal j) { - RelationalDBInterface::AccountTxs ret; + RelationalDatabase::AccountTxs ret; std::string sql = transactionsSQL( app, @@ -883,7 +887,7 @@ getAccountTxs( if (!total && limit_used) { - RelationalDBInterface::AccountTxOptions opt = options; + RelationalDatabase::AccountTxOptions opt = options; opt.offset = 0; std::string sql1 = transactionsSQL( app, "COUNT(*)", opt, limit_used, descending, false, false, j); @@ -897,12 +901,12 @@ getAccountTxs( return {ret, total}; } -std::pair +std::pair getOldestAccountTxs( soci::session& session, Application& app, LedgerMaster& ledgerMaster, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j) { @@ -910,12 +914,12 @@ getOldestAccountTxs( session, app, ledgerMaster, options, limit_used, false, j); } -std::pair +std::pair getNewestAccountTxs( soci::session& session, Application& app, LedgerMaster& ledgerMaster, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j) { @@ -924,38 +928,38 @@ getNewestAccountTxs( } /** - * @brief getAccountTxsB Returns oldest or newset transactions in binary - * form for given account which match given criteria starting from - * given offset. - * @param session Session with database. + * @brief getAccountTxsB Returns the oldest or newest transactions in binary + * form for the account that matches given criteria starting from + * the provided offset. + * @param session Session with the database. * @param app Application object. - * @param options Struct AccountTxOptions which contain criteria to match: - * the account, minimum and maximum ledger numbers to search, - * offset of first entry to return, number of transactions to return, - * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases, if shard databases are used. - * None if node database is used. + * @param options Struct AccountTxOptions which contains the criteria to match: + * the account, the ledger search range, the offset of the first entry to + * return, the number of transactions to return, and a flag if this + * number is unlimited. + * @param limit_used Number of transactions already returned in calls to other + * shard databases, if shard databases are used. No value if the node + * database is used. * @param descending True for descending order, false for ascending. * @param j Journal. - * @return Vector of tuples of found transactions, its metadata and - * account sequences sorted in given order by account - * sequence. Also number of transactions processed or skipped. - * If this number is >= 0, then it means number of transactions - * processed, if it is < 0, then -number means number of transactions - * skipped. We need to skip some quantity of transactions if option - * offset is > 0 in the options structure. + * @return Vector of tuples each containing (the found transactions, their + * metadata, and their account sequences) sorted by account sequence in + * the specified order along with the number of transactions processed + * or skipped. If this number is >= 0, then it represents the number of + * transactions processed, if it is < 0, then -number represents the + * number of transactions skipped. We need to skip some number of + * transactions if option offset is > 0 in the options structure. */ -static std::pair, int> +static std::pair, int> getAccountTxsB( soci::session& session, Application& app, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, bool descending, beast::Journal j) { - std::vector ret; + std::vector ret; std::string sql = transactionsSQL( app, @@ -1004,7 +1008,7 @@ getAccountTxsB( if (!total && limit_used) { - RelationalDBInterface::AccountTxOptions opt = options; + RelationalDatabase::AccountTxOptions opt = options; opt.offset = 0; std::string sql1 = transactionsSQL( app, "COUNT(*)", opt, limit_used, descending, true, false, j); @@ -1018,22 +1022,22 @@ getAccountTxsB( return {ret, total}; } -std::pair, int> +std::pair, int> getOldestAccountTxsB( soci::session& session, Application& app, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j) { return getAccountTxsB(session, app, options, limit_used, false, j); } -std::pair, int> +std::pair, int> getNewestAccountTxsB( soci::session& session, Application& app, - RelationalDBInterface::AccountTxOptions const& options, + RelationalDatabase::AccountTxOptions const& options, std::optional const& limit_used, beast::Journal j) { @@ -1041,28 +1045,28 @@ getNewestAccountTxsB( } /** - * @brief accountTxPage Searches oldest or newest transactions for given - * account which match given criteria starting from given marker - * and calls callback for each found transaction. - * @param session Session with database. + * @brief accountTxPage Searches for the oldest or newest transactions for the + * account that matches the given criteria starting from the provided + * marker and invokes the callback parameter for each found transaction. + * @param session Session with the database. * @param idCache Account ID cache. * @param onUnsavedLedger Callback function to call on each found unsaved - * ledger within given range. - * @param onTransaction Callback function to call on eahc found transaction. - * @param options Struct AccountTxPageOptions which contain criteria to - * match: the account, minimum and maximum ledger numbers to search, - * marker of first returned entry, number of transactions to return, - * flag if this number unlimited. - * @param limit_used Number or transactions already returned in calls - * to another shard databases. + * ledger within the given range. + * @param onTransaction Callback function to call on each found transaction. + * @param options Struct AccountTxPageOptions which contains the criteria to + * match: the account, the ledger search range, the marker of the first + * returned entry, the number of transactions to return, and a flag if + * this number unlimited. + * @param limit_used Number of transactions already returned in calls + * to other shard databases. * @param page_length Total number of transactions to return. * @param forward True for ascending order, false for descending. - * @return Vector of tuples of found transactions, its metadata and - * account sequences sorted in given order by account - * sequence and marker for next search if search not finished. - * Also number of transactions processed during this call. + * @return Vector of tuples of found transactions, their metadata and account + * sequences sorted in the specified order by account sequence, a marker + * for the next search if the search was not finished and the number of + * transactions processed during this call. */ -static std::pair, int> +static std::pair, int> accountTxPage( soci::session& session, AccountIDCache const& idCache, @@ -1070,7 +1074,7 @@ accountTxPage( std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, - RelationalDBInterface::AccountTxPageOptions const& options, + RelationalDatabase::AccountTxPageOptions const& options, int limit_used, std::uint32_t page_length, bool forward) @@ -1105,7 +1109,7 @@ accountTxPage( findSeq = options.marker->txnSeq; } - std::optional newmarker; + std::optional newmarker; if (limit_used > 0) newmarker = options.marker; @@ -1243,7 +1247,7 @@ accountTxPage( return {newmarker, total}; } -std::pair, int> +std::pair, int> oldestAccountTxPage( soci::session& session, AccountIDCache const& idCache, @@ -1251,7 +1255,7 @@ oldestAccountTxPage( std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, - RelationalDBInterface::AccountTxPageOptions const& options, + RelationalDatabase::AccountTxPageOptions const& options, int limit_used, std::uint32_t page_length) { @@ -1266,7 +1270,7 @@ oldestAccountTxPage( true); } -std::pair, int> +std::pair, int> newestAccountTxPage( soci::session& session, AccountIDCache const& idCache, @@ -1274,7 +1278,7 @@ newestAccountTxPage( std::function< void(std::uint32_t, std::string const&, Blob&&, Blob&&)> const& onTransaction, - RelationalDBInterface::AccountTxPageOptions const& options, + RelationalDatabase::AccountTxPageOptions const& options, int limit_used, std::uint32_t page_length) { @@ -1289,7 +1293,7 @@ newestAccountTxPage( false); } -std::variant +std::variant getTransaction( soci::session& session, Application& app, @@ -1435,4 +1439,5 @@ dbHasSpace(soci::session& session, Config const& config, beast::Journal j) return true; } +} // namespace detail } // namespace ripple diff --git a/src/ripple/app/rdb/backend/detail/impl/Shard.cpp b/src/ripple/app/rdb/backend/detail/impl/Shard.cpp new file mode 100644 index 00000000000..f7a0ce4571b --- /dev/null +++ b/src/ripple/app/rdb/backend/detail/impl/Shard.cpp @@ -0,0 +1,147 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2020 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include +#include +#include +#include +#include +#include + +namespace ripple { +namespace detail { + +DatabasePair +makeMetaDBs( + Config const& config, + DatabaseCon::Setup const& setup, + DatabaseCon::CheckpointerSetup const& checkpointerSetup) +{ + // ledger meta database + auto lgrMetaDB{std::make_unique( + setup, + LgrMetaDBName, + LgrMetaDBPragma, + LgrMetaDBInit, + checkpointerSetup)}; + + if (!config.useTxTables()) + return {std::move(lgrMetaDB), nullptr}; + + // transaction meta database + auto txMetaDB{std::make_unique( + setup, TxMetaDBName, TxMetaDBPragma, TxMetaDBInit, checkpointerSetup)}; + + return {std::move(lgrMetaDB), std::move(txMetaDB)}; +} + +bool +saveLedgerMeta( + std::shared_ptr const& ledger, + Application& app, + soci::session& lgrMetaSession, + soci::session& txnMetaSession, + std::uint32_t const shardIndex) +{ + std::string_view constexpr lgrSQL = + R"sql(INSERT OR REPLACE INTO LedgerMeta VALUES + (:ledgerHash,:shardIndex);)sql"; + + auto const hash = to_string(ledger->info().hash); + lgrMetaSession << lgrSQL, soci::use(hash), soci::use(shardIndex); + + if (!app.config().useTxTables()) + return true; + + auto const aLedger = [&app, ledger]() -> std::shared_ptr { + try + { + auto aLedger = + app.getAcceptedLedgerCache().fetch(ledger->info().hash); + if (!aLedger) + { + aLedger = std::make_shared(ledger, app); + app.getAcceptedLedgerCache().canonicalize_replace_client( + ledger->info().hash, aLedger); + } + + return aLedger; + } + catch (std::exception const&) + { + JLOG(app.journal("Ledger").warn()) + << "An accepted ledger was missing nodes"; + } + + return {}; + }(); + + if (!aLedger) + return false; + + soci::transaction tr(txnMetaSession); + + for (auto const& acceptedLedgerTx : *aLedger) + { + std::string_view constexpr txnSQL = + R"sql(INSERT OR REPLACE INTO TransactionMeta VALUES + (:transactionID,:shardIndex);)sql"; + + auto const transactionID = + to_string(acceptedLedgerTx->getTransactionID()); + + txnMetaSession << txnSQL, soci::use(transactionID), + soci::use(shardIndex); + } + + tr.commit(); + return true; +} + +std::optional +getShardIndexforLedger(soci::session& session, LedgerHash const& hash) +{ + std::uint32_t shardIndex; + session << "SELECT ShardIndex FROM LedgerMeta WHERE LedgerHash = '" << hash + << "';", + soci::into(shardIndex); + + if (!session.got_data()) + return std::nullopt; + + return shardIndex; +} + +std::optional +getShardIndexforTransaction(soci::session& session, TxID const& id) +{ + std::uint32_t shardIndex; + session << "SELECT ShardIndex FROM TransactionMeta WHERE TransID = '" << id + << "';", + soci::into(shardIndex); + + if (!session.got_data()) + return std::nullopt; + + return shardIndex; +} + +} // namespace detail +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp b/src/ripple/app/rdb/backend/impl/PostgresDatabase.cpp similarity index 71% rename from src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp rename to src/ripple/app/rdb/backend/impl/PostgresDatabase.cpp index d242c8b4bd4..5ee4ce5519d 100644 --- a/src/ripple/app/rdb/impl/RelationalDBInterface_postgres.cpp +++ b/src/ripple/app/rdb/backend/impl/PostgresDatabase.cpp @@ -1,7 +1,7 @@ //------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2021 Ripple Labs Inc. + Copyright (c) 2020 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -17,73 +17,137 @@ */ //============================================================================== -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include -#include +#include +#include +#include +#include +#include namespace ripple { -using TxnsData = RelationalDBInterface::AccountTxs; -using TxnsDataBinary = RelationalDBInterface::MetaTxsList; +class PgPool; -using LedgerHash = RelationalDBInterface::LedgerHash; -using LedgerSequence = RelationalDBInterface::LedgerSequence; -using LedgerShortcut = RelationalDBInterface::LedgerShortcut; +using AccountTxResult = RelationalDatabase::AccountTxResult; +using TxnsData = RelationalDatabase::AccountTxs; +using TxnsDataBinary = RelationalDatabase::MetaTxsList; -std::optional -getMinLedgerSeq(std::shared_ptr const& pgPool, beast::Journal j) +class PostgresDatabaseImp final : public PostgresDatabase { +public: + PostgresDatabaseImp( + Application& app, + Config const& config, + JobQueue& jobQueue) + : app_(app) + , j_(app_.journal("PgPool")) + , pgPool_( #ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool)("SELECT min_ledger()"); - if (!seq) - { - JLOG(j.error()) << "Error querying minimum ledger sequence."; - } - else if (!seq.isNull()) - return seq.asInt(); + make_PgPool(config.section("ledger_tx_tables"), j_) #endif - return {}; -} - -std::optional -getMaxLedgerSeq(std::shared_ptr const& pgPool) -{ + ) + { + assert(config.reporting()); #ifdef RIPPLED_REPORTING - auto seq = PgQuery(pgPool)("SELECT max_ledger()"); - if (seq && !seq.isNull()) - return seq.asBigInt(); + if (config.reporting() && !config.reportingReadOnly()) // use pg + { + initSchema(pgPool_); + } #endif - return {}; -} + } -std::string -getCompleteLedgers(std::shared_ptr const& pgPool) -{ + void + stop() override + { #ifdef RIPPLED_REPORTING - auto range = PgQuery(pgPool)("SELECT complete_ledgers()"); - if (range) - return range.c_str(); + pgPool_->stop(); #endif - return "error"; -} + } -std::chrono::seconds -getValidatedLedgerAge(std::shared_ptr const& pgPool, beast::Journal j) -{ - using namespace std::chrono_literals; -#ifdef RIPPLED_REPORTING - auto age = PgQuery(pgPool)("SELECT age()"); - if (!age || age.isNull()) - JLOG(j.debug()) << "No ledgers in database"; - else - return std::chrono::seconds{age.asInt()}; -#endif - return weeks{2}; -} + void + sweep() override; + + std::optional + getMinLedgerSeq() override; + + std::optional + getMaxLedgerSeq() override; + + std::string + getCompleteLedgers() override; + + std::chrono::seconds + getValidatedLedgerAge() override; + + bool + writeLedgerAndTransactions( + LedgerInfo const& info, + std::vector const& accountTxData) override; + + std::optional + getLedgerInfoByIndex(LedgerIndex ledgerSeq) override; + + std::optional + getNewestLedgerInfo() override; + + std::optional + getLedgerInfoByHash(uint256 const& ledgerHash) override; + + uint256 + getHashByIndex(LedgerIndex ledgerIndex) override; + + std::optional + getHashesByIndex(LedgerIndex ledgerIndex) override; + + std::map + getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) override; + + std::vector + getTxHashes(LedgerIndex seq) override; + + std::vector> + getTxHistory(LedgerIndex startIndex) override; + + std::pair + getAccountTx(AccountTxArgs const& args) override; + + Transaction::Locator + locateTransaction(uint256 const& id) override; + + bool + ledgerDbHasSpace(Config const& config) override; + + bool + transactionDbHasSpace(Config const& config) override; + + bool + isCaughtUp(std::string& reason) override; + +private: + Application& app_; + beast::Journal j_; + std::shared_ptr pgPool_; + + bool + dbHasSpace(Config const& config); +}; /** - * @brief loadLedgerInfos Load the ledger info for the specified + * @brief loadLedgerInfos Loads the ledger info for the specified * ledger/s from the database * @param pgPool Link to postgres database * @param whichLedger Specifies the ledger to load via ledger sequence, @@ -254,152 +318,30 @@ loadLedgerHelper( return infos[0]; } -std::optional -getNewestLedgerInfo(std::shared_ptr const& pgPool, Application& app) -{ - return loadLedgerHelper(pgPool, {}, app); -} - -std::optional -getLedgerInfoByIndex( - std::shared_ptr const& pgPool, - std::uint32_t ledgerIndex, - Application& app) -{ - return loadLedgerHelper(pgPool, uint32_t{ledgerIndex}, app); -} - -std::optional -getLedgerInfoByHash( - std::shared_ptr const& pgPool, - uint256 const& ledgerHash, - Application& app) -{ - return loadLedgerHelper(pgPool, uint256{ledgerHash}, app); -} - -uint256 -getHashByIndex( - std::shared_ptr const& pgPool, - std::uint32_t ledgerIndex, - Application& app) -{ - auto infos = loadLedgerInfos(pgPool, ledgerIndex, app); - assert(infos.size() <= 1); - if (infos.size()) - return infos[0].hash; - return {}; -} - -bool -getHashesByIndex( - std::shared_ptr const& pgPool, - std::uint32_t ledgerIndex, - uint256& ledgerHash, - uint256& parentHash, - Application& app) -{ - auto infos = loadLedgerInfos(pgPool, ledgerIndex, app); - assert(infos.size() <= 1); - if (infos.size()) - { - ledgerHash = infos[0].hash; - parentHash = infos[0].parentHash; - return true; - } - return false; -} - -std::map -getHashesByIndex( - std::shared_ptr const& pgPool, - std::uint32_t minSeq, - std::uint32_t maxSeq, - Application& app) -{ - std::map ret; - auto infos = loadLedgerInfos(pgPool, std::make_pair(minSeq, maxSeq), app); - for (auto& info : infos) - { - ret[info.seq] = {info.hash, info.parentHash}; - } - return ret; -} - -std::vector -getTxHashes( - std::shared_ptr const& pgPool, - LedgerIndex seq, - Application& app) -{ - std::vector nodestoreHashes; - #ifdef RIPPLED_REPORTING - auto log = app.journal("Ledger"); - - std::string query = - "SELECT nodestore_hash" - " FROM transactions " - " WHERE ledger_seq = " + - std::to_string(seq); - auto res = PgQuery(pgPool)(query.c_str()); - - if (!res) - { - JLOG(log.error()) << __func__ - << " : Postgres response is null - query = " << query; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(log.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - query = " << query; - assert(false); - return {}; - } - - JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(log.debug()) << __func__ - << " : Ledger not found. query = " << query; - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 1) - { - JLOG(log.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . query = " << query; - assert(false); - return {}; - } - } +static bool +writeToLedgersDB(LedgerInfo const& info, PgQuery& pgQuery, beast::Journal& j) +{ + JLOG(j.debug()) << __func__; + auto cmd = boost::format( + R"(INSERT INTO ledgers + VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))"); - JLOG(log.trace()) << __func__ << " : result = " << res.c_str() - << " : query = " << query; - for (size_t i = 0; i < res.ntuples(); ++i) - { - char const* nodestoreHash = res.c_str(i, 0); - uint256 hash; - if (!hash.parseHex(nodestoreHash + 2)) - assert(false); + auto ledgerInsert = boost::str( + cmd % info.seq % strHex(info.hash) % strHex(info.parentHash) % + info.drops.drops() % info.closeTime.time_since_epoch().count() % + info.parentCloseTime.time_since_epoch().count() % + info.closeTimeResolution.count() % info.closeFlags % + strHex(info.accountHash) % strHex(info.txHash)); + JLOG(j.trace()) << __func__ << " : " + << " : " + << "query string = " << ledgerInsert; - nodestoreHashes.push_back(hash); - } -#endif + auto res = pgQuery(ledgerInsert.data()); - return nodestoreHashes; + return res; } -#ifdef RIPPLED_REPORTING enum class DataFormat { binary, expanded }; static std::variant flatFetchTransactions( @@ -448,7 +390,7 @@ flatFetchTransactions( static std::pair processAccountTxStoredProcedureResult( - AccountTxArgs const& args, + RelationalDatabase::AccountTxArgs const& args, Json::Value& result, Application& app, beast::Journal j) @@ -538,36 +480,392 @@ processAccountTxStoredProcedureResult( } #endif -std::pair -getAccountTx( - std::shared_ptr const& pgPool, - AccountTxArgs const& args, - Application& app, - beast::Journal j) +void +PostgresDatabaseImp::sweep() { #ifdef RIPPLED_REPORTING - pg_params dbParams; + pgPool_->idleSweeper(); +#endif +} - char const*& command = dbParams.first; - std::vector>& values = dbParams.second; - command = - "SELECT account_tx($1::bytea, $2::bool, " - "$3::bigint, $4::bigint, $5::bigint, $6::bytea, " - "$7::bigint, $8::bool, $9::bigint, $10::bigint)"; - values.resize(10); - values[0] = "\\x" + strHex(args.account); - values[1] = args.forward ? "true" : "false"; +std::optional +PostgresDatabaseImp::getMinLedgerSeq() +{ +#ifdef RIPPLED_REPORTING + auto seq = PgQuery(pgPool_)("SELECT min_ledger()"); + if (!seq) + { + JLOG(j_.error()) << "Error querying minimum ledger sequence."; + } + else if (!seq.isNull()) + return seq.asInt(); +#endif + return {}; +} - static std::uint32_t const page_length(200); - if (args.limit == 0 || args.limit > page_length) - values[2] = std::to_string(page_length); +std::optional +PostgresDatabaseImp::getMaxLedgerSeq() +{ +#ifdef RIPPLED_REPORTING + auto seq = PgQuery(pgPool_)("SELECT max_ledger()"); + if (seq && !seq.isNull()) + return seq.asBigInt(); +#endif + return {}; +} + +std::string +PostgresDatabaseImp::getCompleteLedgers() +{ +#ifdef RIPPLED_REPORTING + auto range = PgQuery(pgPool_)("SELECT complete_ledgers()"); + if (range) + return range.c_str(); +#endif + return "error"; +} + +std::chrono::seconds +PostgresDatabaseImp::getValidatedLedgerAge() +{ + using namespace std::chrono_literals; +#ifdef RIPPLED_REPORTING + auto age = PgQuery(pgPool_)("SELECT age()"); + if (!age || age.isNull()) + JLOG(j_.debug()) << "No ledgers in database"; else - values[2] = std::to_string(args.limit); + return std::chrono::seconds{age.asInt()}; +#endif + return weeks{2}; +} - if (args.ledger) +bool +PostgresDatabaseImp::writeLedgerAndTransactions( + LedgerInfo const& info, + std::vector const& accountTxData) +{ +#ifdef RIPPLED_REPORTING + JLOG(j_.debug()) << __func__ << " : " + << "Beginning write to Postgres"; + + try { - if (auto range = std::get_if(&args.ledger.value())) - { + // Create a PgQuery object to run multiple commands over the same + // connection in a single transaction block. + PgQuery pg(pgPool_); + auto res = pg("BEGIN"); + if (!res || res.status() != PGRES_COMMAND_OK) + { + std::stringstream msg; + msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); + Throw(msg.str()); + } + + // Writing to the ledgers db fails if the ledger already exists in the + // db. In this situation, the ETL process has detected there is another + // writer, and falls back to only publishing + if (!writeToLedgersDB(info, pg, j_)) + { + JLOG(j_.warn()) << __func__ << " : " + << "Failed to write to ledgers database."; + return false; + } + + std::stringstream transactionsCopyBuffer; + std::stringstream accountTransactionsCopyBuffer; + for (auto const& data : accountTxData) + { + std::string txHash = strHex(data.txHash); + std::string nodestoreHash = strHex(data.nodestoreHash); + auto idx = data.transactionIndex; + auto ledgerSeq = data.ledgerSequence; + + transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t' + << std::to_string(idx) << '\t' << "\\\\x" + << txHash << '\t' << "\\\\x" << nodestoreHash + << '\n'; + + for (auto const& a : data.accounts) + { + std::string acct = strHex(a); + accountTransactionsCopyBuffer + << "\\\\x" << acct << '\t' << std::to_string(ledgerSeq) + << '\t' << std::to_string(idx) << '\n'; + } + } + + pg.bulkInsert("transactions", transactionsCopyBuffer.str()); + pg.bulkInsert( + "account_transactions", accountTransactionsCopyBuffer.str()); + + res = pg("COMMIT"); + if (!res || res.status() != PGRES_COMMAND_OK) + { + std::stringstream msg; + msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); + assert(false); + Throw(msg.str()); + } + + JLOG(j_.info()) << __func__ << " : " + << "Successfully wrote to Postgres"; + return true; + } + catch (std::exception& e) + { + JLOG(j_.error()) << __func__ + << "Caught exception writing to Postgres : " + << e.what(); + assert(false); + return false; + } +#else + return false; +#endif +} + +std::optional +PostgresDatabaseImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) +{ + return loadLedgerHelper(pgPool_, ledgerSeq, app_); +} + +std::optional +PostgresDatabaseImp::getNewestLedgerInfo() +{ + return loadLedgerHelper(pgPool_, {}, app_); +} + +std::optional +PostgresDatabaseImp::getLedgerInfoByHash(uint256 const& ledgerHash) +{ + return loadLedgerHelper(pgPool_, ledgerHash, app_); +} + +uint256 +PostgresDatabaseImp::getHashByIndex(LedgerIndex ledgerIndex) +{ + auto infos = loadLedgerInfos(pgPool_, ledgerIndex, app_); + assert(infos.size() <= 1); + if (infos.size()) + return infos[0].hash; + return {}; +} + +std::optional +PostgresDatabaseImp::getHashesByIndex(LedgerIndex ledgerIndex) +{ + LedgerHashPair p; + auto infos = loadLedgerInfos(pgPool_, ledgerIndex, app_); + assert(infos.size() <= 1); + if (infos.size()) + { + p.ledgerHash = infos[0].hash; + p.parentHash = infos[0].parentHash; + return p; + } + return {}; +} + +std::map +PostgresDatabaseImp::getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) +{ + std::map ret; + auto infos = loadLedgerInfos(pgPool_, std::make_pair(minSeq, maxSeq), app_); + for (auto& info : infos) + { + ret[info.seq] = {info.hash, info.parentHash}; + } + return ret; +} + +std::vector +PostgresDatabaseImp::getTxHashes(LedgerIndex seq) +{ + std::vector nodestoreHashes; + +#ifdef RIPPLED_REPORTING + auto log = app_.journal("Ledger"); + + std::string query = + "SELECT nodestore_hash" + " FROM transactions " + " WHERE ledger_seq = " + + std::to_string(seq); + auto res = PgQuery(pgPool_)(query.c_str()); + + if (!res) + { + JLOG(log.error()) << __func__ + << " : Postgres response is null - query = " << query; + assert(false); + return {}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(log.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - query = " << query; + assert(false); + return {}; + } + + JLOG(log.trace()) << __func__ << " Postgres result msg : " << res.msg(); + + if (res.isNull() || res.ntuples() == 0) + { + JLOG(log.debug()) << __func__ + << " : Ledger not found. query = " << query; + return {}; + } + else if (res.ntuples() > 0) + { + if (res.nfields() != 1) + { + JLOG(log.error()) << __func__ + << " : Wrong number of fields in Postgres " + "response. Expected 1, but got " + << res.nfields() << " . query = " << query; + assert(false); + return {}; + } + } + + JLOG(log.trace()) << __func__ << " : result = " << res.c_str() + << " : query = " << query; + for (size_t i = 0; i < res.ntuples(); ++i) + { + char const* nodestoreHash = res.c_str(i, 0); + uint256 hash; + if (!hash.parseHex(nodestoreHash + 2)) + assert(false); + + nodestoreHashes.push_back(hash); + } +#endif + + return nodestoreHashes; +} + +std::vector> +PostgresDatabaseImp::getTxHistory(LedgerIndex startIndex) +{ + std::vector> ret; + +#ifdef RIPPLED_REPORTING + if (!app_.config().reporting()) + { + assert(false); + Throw( + "called getTxHistory but not in reporting mode"); + } + + std::string sql = boost::str( + boost::format("SELECT nodestore_hash, ledger_seq " + " FROM transactions" + " ORDER BY ledger_seq DESC LIMIT 20 " + "OFFSET %u;") % + startIndex); + + auto res = PgQuery(pgPool_)(sql.data()); + + if (!res) + { + JLOG(j_.error()) << __func__ + << " : Postgres response is null - sql = " << sql; + assert(false); + return {}; + } + else if (res.status() != PGRES_TUPLES_OK) + { + JLOG(j_.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - sql = " << sql; + assert(false); + return {}; + } + + JLOG(j_.trace()) << __func__ << " Postgres result msg : " << res.msg(); + + if (res.isNull() || res.ntuples() == 0) + { + JLOG(j_.debug()) << __func__ << " : Empty postgres response"; + assert(false); + return {}; + } + else if (res.ntuples() > 0) + { + if (res.nfields() != 2) + { + JLOG(j_.error()) << __func__ + << " : Wrong number of fields in Postgres " + "response. Expected 1, but got " + << res.nfields() << " . sql = " << sql; + assert(false); + return {}; + } + } + + JLOG(j_.trace()) << __func__ << " : Postgres result = " << res.c_str(); + + std::vector nodestoreHashes; + std::vector ledgerSequences; + for (size_t i = 0; i < res.ntuples(); ++i) + { + uint256 hash; + if (!hash.parseHex(res.c_str(i, 0) + 2)) + assert(false); + nodestoreHashes.push_back(hash); + ledgerSequences.push_back(res.asBigInt(i, 1)); + } + + auto txns = flatFetchTransactions(app_, nodestoreHashes); + for (size_t i = 0; i < txns.size(); ++i) + { + auto const& [sttx, meta] = txns[i]; + assert(sttx); + + std::string reason; + auto txn = std::make_shared(sttx, reason, app_); + txn->setLedger(ledgerSequences[i]); + txn->setStatus(COMMITTED); + ret.push_back(txn); + } + +#endif + return ret; +} + +std::pair +PostgresDatabaseImp::getAccountTx(AccountTxArgs const& args) +{ +#ifdef RIPPLED_REPORTING + pg_params dbParams; + + char const*& command = dbParams.first; + std::vector>& values = dbParams.second; + command = + "SELECT account_tx($1::bytea, $2::bool, " + "$3::bigint, $4::bigint, $5::bigint, $6::bytea, " + "$7::bigint, $8::bool, $9::bigint, $10::bigint)"; + values.resize(10); + values[0] = "\\x" + strHex(args.account); + values[1] = args.forward ? "true" : "false"; + + static std::uint32_t const page_length(200); + if (args.limit == 0 || args.limit > page_length) + values[2] = std::to_string(page_length); + else + values[2] = std::to_string(args.limit); + + if (args.ledger) + { + if (auto range = std::get_if(&args.ledger.value())) + { values[3] = std::to_string(range->min); values[4] = std::to_string(range->max); } @@ -587,8 +885,8 @@ getAccountTx( } else { - JLOG(j.error()) << "doAccountTxStoredProcedure - " - << "Error parsing ledger args"; + JLOG(j_.error()) << "doAccountTxStoredProcedure - " + << "Error parsing ledger args"; return {}; } } @@ -600,52 +898,52 @@ getAccountTx( } for (size_t i = 0; i < values.size(); ++i) { - JLOG(j.trace()) << "value " << std::to_string(i) << " = " - << (values[i] ? values[i].value() : "null"); + JLOG(j_.trace()) << "value " << std::to_string(i) << " = " + << (values[i] ? values[i].value() : "null"); } - auto res = PgQuery(pgPool)(dbParams); + auto res = PgQuery(pgPool_)(dbParams); if (!res) { - JLOG(j.error()) << __func__ - << " : Postgres response is null - account = " - << strHex(args.account); + JLOG(j_.error()) << __func__ + << " : Postgres response is null - account = " + << strHex(args.account); assert(false); return {{}, {rpcINTERNAL, "Postgres error"}}; } else if (res.status() != PGRES_TUPLES_OK) { - JLOG(j.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - account = " << strHex(args.account); + JLOG(j_.error()) << __func__ + << " : Postgres response should have been " + "PGRES_TUPLES_OK but instead was " + << res.status() << " - msg = " << res.msg() + << " - account = " << strHex(args.account); assert(false); return {{}, {rpcINTERNAL, "Postgres error"}}; } - JLOG(j.trace()) << __func__ << " Postgres result msg : " << res.msg(); + JLOG(j_.trace()) << __func__ << " Postgres result msg : " << res.msg(); if (res.isNull() || res.ntuples() == 0) { - JLOG(j.debug()) << __func__ - << " : No data returned from Postgres : account = " - << strHex(args.account); + JLOG(j_.debug()) << __func__ + << " : No data returned from Postgres : account = " + << strHex(args.account); assert(false); return {{}, {rpcINTERNAL, "Postgres error"}}; } char const* resultStr = res.c_str(); - JLOG(j.trace()) << __func__ << " : " - << "postgres result = " << resultStr - << " : account = " << strHex(args.account); + JLOG(j_.trace()) << __func__ << " : " + << "postgres result = " << resultStr + << " : account = " << strHex(args.account); Json::Value v; Json::Reader reader; bool success = reader.parse(resultStr, resultStr + strlen(resultStr), v); if (success) { - return processAccountTxStoredProcedureResult(args, v, app, j); + return processAccountTxStoredProcedureResult(args, v, app_, j_); } #endif // This shouldn't happen. Postgres should return a parseable error @@ -654,10 +952,7 @@ getAccountTx( } Transaction::Locator -locateTransaction( - std::shared_ptr const& pgPool, - uint256 const& id, - Application& app) +PostgresDatabaseImp::locateTransaction(uint256 const& id) { #ifdef RIPPLED_REPORTING auto baseCmd = boost::format(R"(SELECT tx('%s');)"); @@ -665,11 +960,11 @@ locateTransaction( std::string txHash = "\\x" + strHex(id); std::string sql = boost::str(baseCmd % txHash); - auto res = PgQuery(pgPool)(sql.data()); + auto res = PgQuery(pgPool_)(sql.data()); if (!res) { - JLOG(app.journal("Transaction").error()) + JLOG(app_.journal("Transaction").error()) << __func__ << " : Postgres response is null - tx ID = " << strHex(id); assert(false); @@ -677,7 +972,7 @@ locateTransaction( } else if (res.status() != PGRES_TUPLES_OK) { - JLOG(app.journal("Transaction").error()) + JLOG(app_.journal("Transaction").error()) << __func__ << " : Postgres response should have been " "PGRES_TUPLES_OK but instead was " @@ -687,11 +982,11 @@ locateTransaction( return {}; } - JLOG(app.journal("Transaction").trace()) + JLOG(app_.journal("Transaction").trace()) << __func__ << " Postgres result msg : " << res.msg(); if (res.isNull() || res.ntuples() == 0) { - JLOG(app.journal("Transaction").debug()) + JLOG(app_.journal("Transaction").debug()) << __func__ << " : No data returned from Postgres : tx ID = " << strHex(id); // This shouldn't happen @@ -700,7 +995,7 @@ locateTransaction( } char const* resultStr = res.c_str(); - JLOG(app.journal("Transaction").debug()) + JLOG(app_.journal("Transaction").debug()) << "postgres result = " << resultStr; Json::Value v; @@ -733,210 +1028,50 @@ locateTransaction( return {}; } -#ifdef RIPPLED_REPORTING -static bool -writeToLedgersDB(LedgerInfo const& info, PgQuery& pgQuery, beast::Journal& j) +bool +PostgresDatabaseImp::dbHasSpace(Config const& config) { - JLOG(j.debug()) << __func__; - auto cmd = boost::format( - R"(INSERT INTO ledgers - VALUES (%u,'\x%s', '\x%s',%u,%u,%u,%u,%u,'\x%s','\x%s'))"); + /* Postgres server could be running on a different machine. */ - auto ledgerInsert = boost::str( - cmd % info.seq % strHex(info.hash) % strHex(info.parentHash) % - info.drops.drops() % info.closeTime.time_since_epoch().count() % - info.parentCloseTime.time_since_epoch().count() % - info.closeTimeResolution.count() % info.closeFlags % - strHex(info.accountHash) % strHex(info.txHash)); - JLOG(j.trace()) << __func__ << " : " - << " : " - << "query string = " << ledgerInsert; - - auto res = pgQuery(ledgerInsert.data()); - - return res; + return true; } -#endif bool -writeLedgerAndTransactions( - std::shared_ptr const& pgPool, - LedgerInfo const& info, - std::vector const& accountTxData, - beast::Journal& j) +PostgresDatabaseImp::ledgerDbHasSpace(Config const& config) { -#ifdef RIPPLED_REPORTING - JLOG(j.debug()) << __func__ << " : " - << "Beginning write to Postgres"; - - try - { - // Create a PgQuery object to run multiple commands over the same - // connection in a single transaction block. - PgQuery pg(pgPool); - auto res = pg("BEGIN"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - Throw(msg.str()); - } - - // Writing to the ledgers db fails if the ledger already exists in the - // db. In this situation, the ETL process has detected there is another - // writer, and falls back to only publishing - if (!writeToLedgersDB(info, pg, j)) - { - JLOG(j.warn()) << __func__ << " : " - << "Failed to write to ledgers database."; - return false; - } - - std::stringstream transactionsCopyBuffer; - std::stringstream accountTransactionsCopyBuffer; - for (auto const& data : accountTxData) - { - std::string txHash = strHex(data.txHash); - std::string nodestoreHash = strHex(data.nodestoreHash); - auto idx = data.transactionIndex; - auto ledgerSeq = data.ledgerSequence; - - transactionsCopyBuffer << std::to_string(ledgerSeq) << '\t' - << std::to_string(idx) << '\t' << "\\\\x" - << txHash << '\t' << "\\\\x" << nodestoreHash - << '\n'; - - for (auto const& a : data.accounts) - { - std::string acct = strHex(a); - accountTransactionsCopyBuffer - << "\\\\x" << acct << '\t' << std::to_string(ledgerSeq) - << '\t' << std::to_string(idx) << '\n'; - } - } - - pg.bulkInsert("transactions", transactionsCopyBuffer.str()); - pg.bulkInsert( - "account_transactions", accountTransactionsCopyBuffer.str()); - - res = pg("COMMIT"); - if (!res || res.status() != PGRES_COMMAND_OK) - { - std::stringstream msg; - msg << "bulkWriteToTable : Postgres insert error: " << res.msg(); - assert(false); - Throw(msg.str()); - } + return dbHasSpace(config); +} - JLOG(j.info()) << __func__ << " : " - << "Successfully wrote to Postgres"; - return true; - } - catch (std::exception& e) - { - JLOG(j.error()) << __func__ << "Caught exception writing to Postgres : " - << e.what(); - assert(false); - return false; - } -#else - return false; -#endif +bool +PostgresDatabaseImp::transactionDbHasSpace(Config const& config) +{ + return dbHasSpace(config); } -std::vector> -getTxHistory( - std::shared_ptr const& pgPool, - LedgerIndex startIndex, - Application& app, - beast::Journal j) +std::unique_ptr +getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue) { - std::vector> ret; + return std::make_unique(app, config, jobQueue); +} +bool +PostgresDatabaseImp::isCaughtUp(std::string& reason) +{ #ifdef RIPPLED_REPORTING - if (!app.config().reporting()) - { - assert(false); - Throw( - "called getTxHistory but not in reporting mode"); - } - - std::string sql = boost::str( - boost::format("SELECT nodestore_hash, ledger_seq " - " FROM transactions" - " ORDER BY ledger_seq DESC LIMIT 20 " - "OFFSET %u;") % - startIndex); - - auto res = PgQuery(pgPool)(sql.data()); - - if (!res) - { - JLOG(j.error()) << __func__ - << " : Postgres response is null - sql = " << sql; - assert(false); - return {}; - } - else if (res.status() != PGRES_TUPLES_OK) - { - JLOG(j.error()) << __func__ - << " : Postgres response should have been " - "PGRES_TUPLES_OK but instead was " - << res.status() << " - msg = " << res.msg() - << " - sql = " << sql; - assert(false); - return {}; - } - - JLOG(j.trace()) << __func__ << " Postgres result msg : " << res.msg(); - - if (res.isNull() || res.ntuples() == 0) - { - JLOG(j.debug()) << __func__ << " : Empty postgres response"; - assert(false); - return {}; - } - else if (res.ntuples() > 0) - { - if (res.nfields() != 2) - { - JLOG(j.error()) << __func__ - << " : Wrong number of fields in Postgres " - "response. Expected 1, but got " - << res.nfields() << " . sql = " << sql; - assert(false); - return {}; - } - } - - JLOG(j.trace()) << __func__ << " : Postgres result = " << res.c_str(); - - std::vector nodestoreHashes; - std::vector ledgerSequences; - for (size_t i = 0; i < res.ntuples(); ++i) + using namespace std::chrono_literals; + auto age = PgQuery(pgPool_)("SELECT age()"); + if (!age || age.isNull()) { - uint256 hash; - if (!hash.parseHex(res.c_str(i, 0) + 2)) - assert(false); - nodestoreHashes.push_back(hash); - ledgerSequences.push_back(res.asBigInt(i, 1)); + reason = "No ledgers in database"; + return false; } - - auto txns = flatFetchTransactions(app, nodestoreHashes); - for (size_t i = 0; i < txns.size(); ++i) + if (std::chrono::seconds{age.asInt()} > 3min) { - auto const& [sttx, meta] = txns[i]; - assert(sttx); - - std::string reason; - auto txn = std::make_shared(sttx, reason, app); - txn->setLedger(ledgerSequences[i]); - txn->setStatus(COMMITTED); - ret.push_back(txn); + reason = "No recently-published ledger"; + return false; } - #endif - return ret; + return true; } } // namespace ripple diff --git a/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp similarity index 75% rename from src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp rename to src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp index cd5277fb9a9..e6ec44399a0 100644 --- a/src/ripple/app/rdb/backend/RelationalDBInterfaceSqlite.cpp +++ b/src/ripple/app/rdb/backend/impl/SQLiteDatabase.cpp @@ -23,43 +23,41 @@ #include #include #include -#include -#include -#include -#include +#include +#include +#include #include #include #include #include #include #include -#include -#include #include namespace ripple { -class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite +class SQLiteDatabaseImp final : public SQLiteDatabase { public: - RelationalDBInterfaceSqliteImp( + SQLiteDatabaseImp( Application& app, Config const& config, JobQueue& jobQueue) : app_(app) , useTxTables_(config.useTxTables()) - , j_(app_.journal("Ledger")) + , j_(app_.journal("SQLiteDatabaseImp")) { - DatabaseCon::Setup setup = setup_DatabaseCon(config, j_); + DatabaseCon::Setup const setup = setup_DatabaseCon(config, j_); if (!makeLedgerDBs( config, setup, DatabaseCon::CheckpointerSetup{&jobQueue, &app_.logs()})) { - JLOG(app_.journal("RelationalDBInterfaceSqlite").fatal()) - << "AccountTransactions database should not have a primary key"; - Throw( - "AccountTransactions database initialization failed."); + std::string_view constexpr error = + "Failed to create ledger databases"; + + JLOG(j_.fatal()) << error; + Throw(error.data()); } if (app.getShardStore() && @@ -68,10 +66,11 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite setup, DatabaseCon::CheckpointerSetup{&jobQueue, &app_.logs()})) { - JLOG(app_.journal("RelationalDBInterfaceSqlite").fatal()) - << "Error during meta DB init"; - Throw( - "Shard meta database initialization failed."); + std::string_view constexpr error = + "Failed to create metadata databases"; + + JLOG(j_.fatal()) << error; + Throw(error.data()); } } @@ -105,7 +104,7 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite std::size_t getAccountTransactionCount() override; - RelationalDBInterface::CountMinMax + RelationalDatabase::CountMinMax getLedgerCountMinMax() override; bool @@ -199,12 +198,12 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite std::unique_ptr lgrMetaDB_, txMetaDB_; /** - * @brief makeLedgerDBs Opens node ledger and transaction databases, - * and saves its descriptors into internal variables. + * @brief makeLedgerDBs Opens ledger and transaction databases for the node + * store, and stores their descriptors in private member variables. * @param config Config object. - * @param setup Path to database and other opening parameters. + * @param setup Path to the databases and other opening parameters. * @param checkpointerSetup Checkpointer parameters. - * @return True if node databases opened succsessfully. + * @return True if node databases opened successfully. */ bool makeLedgerDBs( @@ -213,10 +212,10 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite DatabaseCon::CheckpointerSetup const& checkpointerSetup); /** - * @brief makeMetaDBs Opens shard index lookup databases, and saves - * their descriptors into internal variables. + * @brief makeMetaDBs Opens shard index lookup databases, and stores + * their descriptors in private member variables. * @param config Config object. - * @param setup Path to database and other opening parameters. + * @param setup Path to the databases and other opening parameters. * @param checkpointerSetup Checkpointer parameters. * @return True if node databases opened successfully. */ @@ -227,7 +226,8 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite DatabaseCon::CheckpointerSetup const& checkpointerSetup); /** - * @brief seqToShardIndex Converts ledgers sequence to shard index. + * @brief seqToShardIndex Provides the index of the shard that stores the + * ledger with the given sequence. * @param ledgerSeq Ledger sequence. * @return Shard index. */ @@ -238,7 +238,8 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief firstLedgerSeq Returns first ledger sequence for given shard. + * @brief firstLedgerSeq Returns the sequence of the first ledger stored in + * the shard specified by the shard index parameter. * @param shardIndex Shard Index. * @return First ledger sequence. */ @@ -249,7 +250,8 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief lastLedgerSeq Returns last ledger sequence for given shard. + * @brief lastLedgerSeq Returns the sequence of the last ledger stored in + * the shard specified by the shard index parameter. * @param shardIndex Shard Index. * @return Last ledger sequence. */ @@ -260,8 +262,8 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief existsLedger Checks if node ledger DB exists. - * @return True if node ledger DB exists. + * @brief existsLedger Checks if the node store ledger database exists. + * @return True if the node store ledger database exists. */ bool existsLedger() @@ -270,8 +272,9 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief existsTransaction Checks if node transaction DB exists. - * @return True if node transaction DB exists. + * @brief existsTransaction Checks if the node store transaction database + * exists. + * @return True if the node store transaction database exists. */ bool existsTransaction() @@ -290,8 +293,9 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief checkoutTransaction Checks out and returns node ledger DB. - * @return Session to node ledger DB. + * @brief checkoutTransaction Checks out and returns node store ledger + * database. + * @return Session to the node store ledger database. */ auto checkoutLedger() @@ -300,8 +304,9 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief checkoutTransaction Checks out and returns node transaction DB. - * @return Session to node transaction DB. + * @brief checkoutTransaction Checks out and returns the node store + * transaction database. + * @return Session to the node store transaction database. */ auto checkoutTransaction() @@ -310,9 +315,9 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief doLedger Checks out ledger database for shard - * containing given ledger and calls given callback function passing - * shard index and session with the database to it. + * @brief doLedger Checks out the ledger database owned by the shard + * containing the given ledger, and invokes the provided callback + * with a session to that database. * @param ledgerSeq Ledger sequence. * @param callback Callback function to call. * @return Value returned by callback function. @@ -327,9 +332,9 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief doTransaction Checks out transaction database for shard - * containing given ledger and calls given callback function passing - * shard index and session with the database to it. + * @brief doTransaction Checks out the transaction database owned by the + * shard containing the given ledger, and invokes the provided + * callback with a session to that database. * @param ledgerSeq Ledger sequence. * @param callback Callback function to call. * @return Value returned by callback function. @@ -344,12 +349,12 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief iterateLedgerForward Checks out ledger databases for - * all shards in ascending order starting from given shard index - * until shard with the largest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param firstIndex Start shard index to visit or none if all shards + * @brief iterateLedgerForward Checks out ledger databases for all shards in + * ascending order starting from the given shard index, until all + * shards in range have been visited or the callback returns false. + * For each visited shard, we invoke the provided callback with a + * session to the database and the current shard index. + * @param firstIndex First shard index to visit or no value if all shards * should be visited. * @param callback Callback function to call. * @return True if each callback function returned true, false otherwise. @@ -366,12 +371,13 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief iterateTransactionForward Checks out transaction databases for - * all shards in ascending order starting from given shard index - * until shard with the largest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param firstIndex Start shard index to visit or none if all shards + * @brief iterateTransactionForward Checks out transaction databases for all + * shards in ascending order starting from the given shard index, + * until all shards in range have been visited or the callback + * returns false. For each visited shard, we invoke the provided + * callback with a session to the database and the current shard + * index. + * @param firstIndex First shard index to visit or no value if all shards * should be visited. * @param callback Callback function to call. * @return True if each callback function returned true, false otherwise. @@ -388,12 +394,13 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief iterateLedgerBack Checks out ledger databases for - * all shards in descending order starting from given shard index - * until shard with the smallest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param firstIndex Start shard index to visit or none if all shards + * @brief iterateLedgerBack Checks out ledger databases for all + * shards in descending order starting from the given shard index, + * until all shards in range have been visited or the callback + * returns false. For each visited shard, we invoke the provided + * callback with a session to the database and the current shard + * index. + * @param firstIndex First shard index to visit or no value if all shards * should be visited. * @param callback Callback function to call. * @return True if each callback function returned true, false otherwise. @@ -410,12 +417,13 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite } /** - * @brief iterateTransactionForward Checks out transaction databases for - * all shards in descending order starting from given shard index - * until shard with the smallest index visited or callback returned - * false. For each visited shard calls given callback function - * passing shard index and session with the database to it. - * @param firstIndex Start shard index to visit or none if all shards + * @brief iterateTransactionBack Checks out transaction databases for all + * shards in descending order starting from the given shard index, + * until all shards in range have been visited or the callback + * returns false. For each visited shard, we invoke the provided + * callback with a session to the database and the current shard + * index. + * @param firstIndex First shard index to visit or no value if all shards * should be visited. * @param callback Callback function to call. * @return True if each callback function returned true, false otherwise. @@ -433,26 +441,26 @@ class RelationalDBInterfaceSqliteImp : public RelationalDBInterfaceSqlite }; bool -RelationalDBInterfaceSqliteImp::makeLedgerDBs( +SQLiteDatabaseImp::makeLedgerDBs( Config const& config, DatabaseCon::Setup const& setup, DatabaseCon::CheckpointerSetup const& checkpointerSetup) { auto [lgr, tx, res] = - ripple::makeLedgerDBs(config, setup, checkpointerSetup); + detail::makeLedgerDBs(config, setup, checkpointerSetup); txdb_ = std::move(tx); lgrdb_ = std::move(lgr); return res; } bool -RelationalDBInterfaceSqliteImp::makeMetaDBs( +SQLiteDatabaseImp::makeMetaDBs( Config const& config, DatabaseCon::Setup const& setup, DatabaseCon::CheckpointerSetup const& checkpointerSetup) { auto [lgrMetaDB, txMetaDB] = - ripple::makeMetaDBs(config, setup, checkpointerSetup); + detail::makeMetaDBs(config, setup, checkpointerSetup); txMetaDB_ = std::move(txMetaDB); lgrMetaDB_ = std::move(lgrMetaDB); @@ -461,13 +469,13 @@ RelationalDBInterfaceSqliteImp::makeMetaDBs( } std::optional -RelationalDBInterfaceSqliteImp::getMinLedgerSeq() +SQLiteDatabaseImp::getMinLedgerSeq() { /* if databases exists, use it */ if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getMinLedgerSeq(*db, TableType::Ledgers); + return detail::getMinLedgerSeq(*db, detail::TableType::Ledgers); } /* else use shard databases, if available */ @@ -476,7 +484,8 @@ RelationalDBInterfaceSqliteImp::getMinLedgerSeq() std::optional res; iterateLedgerForward( {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = ripple::getMinLedgerSeq(session, TableType::Ledgers); + res = detail::getMinLedgerSeq( + session, detail::TableType::Ledgers); return !res; }); return res; @@ -487,7 +496,7 @@ RelationalDBInterfaceSqliteImp::getMinLedgerSeq() } std::optional -RelationalDBInterfaceSqliteImp::getTransactionsMinLedgerSeq() +SQLiteDatabaseImp::getTransactionsMinLedgerSeq() { if (!useTxTables_) return {}; @@ -495,7 +504,7 @@ RelationalDBInterfaceSqliteImp::getTransactionsMinLedgerSeq() if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getMinLedgerSeq(*db, TableType::Transactions); + return detail::getMinLedgerSeq(*db, detail::TableType::Transactions); } if (shardStoreExists()) @@ -503,7 +512,8 @@ RelationalDBInterfaceSqliteImp::getTransactionsMinLedgerSeq() std::optional res; iterateTransactionForward( {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = ripple::getMinLedgerSeq(session, TableType::Transactions); + res = detail::getMinLedgerSeq( + session, detail::TableType::Transactions); return !res; }); return res; @@ -513,7 +523,7 @@ RelationalDBInterfaceSqliteImp::getTransactionsMinLedgerSeq() } std::optional -RelationalDBInterfaceSqliteImp::getAccountTransactionsMinLedgerSeq() +SQLiteDatabaseImp::getAccountTransactionsMinLedgerSeq() { if (!useTxTables_) return {}; @@ -521,7 +531,8 @@ RelationalDBInterfaceSqliteImp::getAccountTransactionsMinLedgerSeq() if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getMinLedgerSeq(*db, TableType::AccountTransactions); + return detail::getMinLedgerSeq( + *db, detail::TableType::AccountTransactions); } if (shardStoreExists()) @@ -529,8 +540,8 @@ RelationalDBInterfaceSqliteImp::getAccountTransactionsMinLedgerSeq() std::optional res; iterateTransactionForward( {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = ripple::getMinLedgerSeq( - session, TableType::AccountTransactions); + res = detail::getMinLedgerSeq( + session, detail::TableType::AccountTransactions); return !res; }); return res; @@ -540,12 +551,12 @@ RelationalDBInterfaceSqliteImp::getAccountTransactionsMinLedgerSeq() } std::optional -RelationalDBInterfaceSqliteImp::getMaxLedgerSeq() +SQLiteDatabaseImp::getMaxLedgerSeq() { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getMaxLedgerSeq(*db, TableType::Ledgers); + return detail::getMaxLedgerSeq(*db, detail::TableType::Ledgers); } if (shardStoreExists()) @@ -553,7 +564,8 @@ RelationalDBInterfaceSqliteImp::getMaxLedgerSeq() std::optional res; iterateLedgerBack( {}, [&](soci::session& session, std::uint32_t shardIndex) { - res = ripple::getMaxLedgerSeq(session, TableType::Ledgers); + res = detail::getMaxLedgerSeq( + session, detail::TableType::Ledgers); return !res; }); return res; @@ -563,8 +575,7 @@ RelationalDBInterfaceSqliteImp::getMaxLedgerSeq() } void -RelationalDBInterfaceSqliteImp::deleteTransactionByLedgerSeq( - LedgerIndex ledgerSeq) +SQLiteDatabaseImp::deleteTransactionByLedgerSeq(LedgerIndex ledgerSeq) { if (!useTxTables_) return; @@ -572,27 +583,29 @@ RelationalDBInterfaceSqliteImp::deleteTransactionByLedgerSeq( if (existsTransaction()) { auto db = checkoutTransaction(); - ripple::deleteByLedgerSeq(*db, TableType::Transactions, ledgerSeq); + detail::deleteByLedgerSeq( + *db, detail::TableType::Transactions, ledgerSeq); return; } if (shardStoreExists()) { doTransaction(ledgerSeq, [&](soci::session& session) { - ripple::deleteByLedgerSeq( - session, TableType::Transactions, ledgerSeq); + detail::deleteByLedgerSeq( + session, detail::TableType::Transactions, ledgerSeq); return true; }); } } void -RelationalDBInterfaceSqliteImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) +SQLiteDatabaseImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) { if (existsLedger()) { auto db = checkoutLedger(); - ripple::deleteBeforeLedgerSeq(*db, TableType::Ledgers, ledgerSeq); + detail::deleteBeforeLedgerSeq( + *db, detail::TableType::Ledgers, ledgerSeq); return; } @@ -601,16 +614,15 @@ RelationalDBInterfaceSqliteImp::deleteBeforeLedgerSeq(LedgerIndex ledgerSeq) iterateLedgerBack( seqToShardIndex(ledgerSeq), [&](soci::session& session, std::uint32_t shardIndex) { - ripple::deleteBeforeLedgerSeq( - session, TableType::Ledgers, ledgerSeq); + detail::deleteBeforeLedgerSeq( + session, detail::TableType::Ledgers, ledgerSeq); return true; }); } } void -RelationalDBInterfaceSqliteImp::deleteTransactionsBeforeLedgerSeq( - LedgerIndex ledgerSeq) +SQLiteDatabaseImp::deleteTransactionsBeforeLedgerSeq(LedgerIndex ledgerSeq) { if (!useTxTables_) return; @@ -618,7 +630,8 @@ RelationalDBInterfaceSqliteImp::deleteTransactionsBeforeLedgerSeq( if (existsTransaction()) { auto db = checkoutTransaction(); - ripple::deleteBeforeLedgerSeq(*db, TableType::Transactions, ledgerSeq); + detail::deleteBeforeLedgerSeq( + *db, detail::TableType::Transactions, ledgerSeq); return; } @@ -627,15 +640,15 @@ RelationalDBInterfaceSqliteImp::deleteTransactionsBeforeLedgerSeq( iterateTransactionBack( seqToShardIndex(ledgerSeq), [&](soci::session& session, std::uint32_t shardIndex) { - ripple::deleteBeforeLedgerSeq( - session, TableType::Transactions, ledgerSeq); + detail::deleteBeforeLedgerSeq( + session, detail::TableType::Transactions, ledgerSeq); return true; }); } } void -RelationalDBInterfaceSqliteImp::deleteAccountTransactionsBeforeLedgerSeq( +SQLiteDatabaseImp::deleteAccountTransactionsBeforeLedgerSeq( LedgerIndex ledgerSeq) { if (!useTxTables_) @@ -644,8 +657,8 @@ RelationalDBInterfaceSqliteImp::deleteAccountTransactionsBeforeLedgerSeq( if (existsTransaction()) { auto db = checkoutTransaction(); - ripple::deleteBeforeLedgerSeq( - *db, TableType::AccountTransactions, ledgerSeq); + detail::deleteBeforeLedgerSeq( + *db, detail::TableType::AccountTransactions, ledgerSeq); return; } @@ -654,15 +667,15 @@ RelationalDBInterfaceSqliteImp::deleteAccountTransactionsBeforeLedgerSeq( iterateTransactionBack( seqToShardIndex(ledgerSeq), [&](soci::session& session, std::uint32_t shardIndex) { - ripple::deleteBeforeLedgerSeq( - session, TableType::AccountTransactions, ledgerSeq); + detail::deleteBeforeLedgerSeq( + session, detail::TableType::AccountTransactions, ledgerSeq); return true; }); } } std::size_t -RelationalDBInterfaceSqliteImp::getTransactionCount() +SQLiteDatabaseImp::getTransactionCount() { if (!useTxTables_) return 0; @@ -670,7 +683,7 @@ RelationalDBInterfaceSqliteImp::getTransactionCount() if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getRows(*db, TableType::Transactions); + return detail::getRows(*db, detail::TableType::Transactions); } if (shardStoreExists()) @@ -678,7 +691,8 @@ RelationalDBInterfaceSqliteImp::getTransactionCount() std::size_t rows = 0; iterateTransactionForward( {}, [&](soci::session& session, std::uint32_t shardIndex) { - rows += ripple::getRows(session, TableType::Transactions); + rows += + detail::getRows(session, detail::TableType::Transactions); return true; }); return rows; @@ -688,7 +702,7 @@ RelationalDBInterfaceSqliteImp::getTransactionCount() } std::size_t -RelationalDBInterfaceSqliteImp::getAccountTransactionCount() +SQLiteDatabaseImp::getAccountTransactionCount() { if (!useTxTables_) return 0; @@ -696,7 +710,7 @@ RelationalDBInterfaceSqliteImp::getAccountTransactionCount() if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getRows(*db, TableType::AccountTransactions); + return detail::getRows(*db, detail::TableType::AccountTransactions); } if (shardStoreExists()) @@ -704,8 +718,8 @@ RelationalDBInterfaceSqliteImp::getAccountTransactionCount() std::size_t rows = 0; iterateTransactionForward( {}, [&](soci::session& session, std::uint32_t shardIndex) { - rows += - ripple::getRows(session, TableType::AccountTransactions); + rows += detail::getRows( + session, detail::TableType::AccountTransactions); return true; }); return rows; @@ -714,13 +728,13 @@ RelationalDBInterfaceSqliteImp::getAccountTransactionCount() return 0; } -RelationalDBInterface::CountMinMax -RelationalDBInterfaceSqliteImp::getLedgerCountMinMax() +RelationalDatabase::CountMinMax +SQLiteDatabaseImp::getLedgerCountMinMax() { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getRowsMinMax(*db, TableType::Ledgers); + return detail::getRowsMinMax(*db, detail::TableType::Ledgers); } if (shardStoreExists()) @@ -728,7 +742,8 @@ RelationalDBInterfaceSqliteImp::getLedgerCountMinMax() CountMinMax res{0, 0, 0}; iterateLedgerForward( {}, [&](soci::session& session, std::uint32_t shardIndex) { - auto r = ripple::getRowsMinMax(session, TableType::Ledgers); + auto r = + detail::getRowsMinMax(session, detail::TableType::Ledgers); if (r.numberOfRows) { res.numberOfRows += r.numberOfRows; @@ -745,13 +760,13 @@ RelationalDBInterfaceSqliteImp::getLedgerCountMinMax() } bool -RelationalDBInterfaceSqliteImp::saveValidatedLedger( +SQLiteDatabaseImp::saveValidatedLedger( std::shared_ptr const& ledger, bool current) { if (existsLedger()) { - if (!ripple::saveValidatedLedger( + if (!detail::saveValidatedLedger( *lgrdb_, *txdb_, app_, ledger, current)) return false; } @@ -769,7 +784,7 @@ RelationalDBInterfaceSqliteImp::saveValidatedLedger( auto lgrMetaSession = lgrMetaDB_->checkoutDb(); auto txMetaSession = txMetaDB_->checkoutDb(); - return ripple::saveLedgerMeta( + return detail::saveLedgerMeta( ledger, app_, *lgrMetaSession, @@ -781,19 +796,22 @@ RelationalDBInterfaceSqliteImp::saveValidatedLedger( } std::optional -RelationalDBInterfaceSqliteImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) +SQLiteDatabaseImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getLedgerInfoByIndex(*db, ledgerSeq, j_); + auto const res = detail::getLedgerInfoByIndex(*db, ledgerSeq, j_); + + if (res.has_value()) + return res; } if (shardStoreExists()) { std::optional res; doLedger(ledgerSeq, [&](soci::session& session) { - res = ripple::getLedgerInfoByIndex(session, ledgerSeq, j_); + res = detail::getLedgerInfoByIndex(session, ledgerSeq, j_); return true; }); return res; @@ -803,12 +821,15 @@ RelationalDBInterfaceSqliteImp::getLedgerInfoByIndex(LedgerIndex ledgerSeq) } std::optional -RelationalDBInterfaceSqliteImp::getNewestLedgerInfo() +SQLiteDatabaseImp::getNewestLedgerInfo() { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getNewestLedgerInfo(*db, j_); + auto const res = detail::getNewestLedgerInfo(*db, j_); + + if (res.has_value()) + return res; } if (shardStoreExists()) @@ -816,7 +837,7 @@ RelationalDBInterfaceSqliteImp::getNewestLedgerInfo() std::optional res; iterateLedgerBack( {}, [&](soci::session& session, std::uint32_t shardIndex) { - if (auto info = ripple::getNewestLedgerInfo(session, j_)) + if (auto info = detail::getNewestLedgerInfo(session, j_)) { res = info; return false; @@ -831,13 +852,16 @@ RelationalDBInterfaceSqliteImp::getNewestLedgerInfo() } std::optional -RelationalDBInterfaceSqliteImp::getLimitedOldestLedgerInfo( - LedgerIndex ledgerFirstIndex) +SQLiteDatabaseImp::getLimitedOldestLedgerInfo(LedgerIndex ledgerFirstIndex) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getLimitedOldestLedgerInfo(*db, ledgerFirstIndex, j_); + auto const res = + detail::getLimitedOldestLedgerInfo(*db, ledgerFirstIndex, j_); + + if (res.has_value()) + return res; } if (shardStoreExists()) @@ -846,7 +870,7 @@ RelationalDBInterfaceSqliteImp::getLimitedOldestLedgerInfo( iterateLedgerForward( seqToShardIndex(ledgerFirstIndex), [&](soci::session& session, std::uint32_t shardIndex) { - if (auto info = ripple::getLimitedOldestLedgerInfo( + if (auto info = detail::getLimitedOldestLedgerInfo( session, ledgerFirstIndex, j_)) { res = info; @@ -862,13 +886,16 @@ RelationalDBInterfaceSqliteImp::getLimitedOldestLedgerInfo( } std::optional -RelationalDBInterfaceSqliteImp::getLimitedNewestLedgerInfo( - LedgerIndex ledgerFirstIndex) +SQLiteDatabaseImp::getLimitedNewestLedgerInfo(LedgerIndex ledgerFirstIndex) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getLimitedNewestLedgerInfo(*db, ledgerFirstIndex, j_); + auto const res = + detail::getLimitedNewestLedgerInfo(*db, ledgerFirstIndex, j_); + + if (res.has_value()) + return res; } if (shardStoreExists()) @@ -876,7 +903,7 @@ RelationalDBInterfaceSqliteImp::getLimitedNewestLedgerInfo( std::optional res; iterateLedgerBack( {}, [&](soci::session& session, std::uint32_t shardIndex) { - if (auto info = ripple::getLimitedNewestLedgerInfo( + if (auto info = detail::getLimitedNewestLedgerInfo( session, ledgerFirstIndex, j_)) { res = info; @@ -892,12 +919,15 @@ RelationalDBInterfaceSqliteImp::getLimitedNewestLedgerInfo( } std::optional -RelationalDBInterfaceSqliteImp::getLedgerInfoByHash(uint256 const& ledgerHash) +SQLiteDatabaseImp::getLedgerInfoByHash(uint256 const& ledgerHash) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getLedgerInfoByHash(*db, ledgerHash, j_); + auto const res = detail::getLedgerInfoByHash(*db, ledgerHash, j_); + + if (res.has_value()) + return res; } if (auto shardStore = app_.getShardStore()) @@ -906,11 +936,11 @@ RelationalDBInterfaceSqliteImp::getLedgerInfoByHash(uint256 const& ledgerHash) auto lgrMetaSession = lgrMetaDB_->checkoutDb(); if (auto const shardIndex = - ripple::getShardIndexforLedger(*lgrMetaSession, ledgerHash)) + detail::getShardIndexforLedger(*lgrMetaSession, ledgerHash)) { shardStore->callForLedgerSQLByShardIndex( *shardIndex, [&](soci::session& session) { - res = ripple::getLedgerInfoByHash(session, ledgerHash, j_); + res = detail::getLedgerInfoByHash(session, ledgerHash, j_); return false; // unused }); } @@ -922,19 +952,22 @@ RelationalDBInterfaceSqliteImp::getLedgerInfoByHash(uint256 const& ledgerHash) } uint256 -RelationalDBInterfaceSqliteImp::getHashByIndex(LedgerIndex ledgerIndex) +SQLiteDatabaseImp::getHashByIndex(LedgerIndex ledgerIndex) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getHashByIndex(*db, ledgerIndex); + auto const res = detail::getHashByIndex(*db, ledgerIndex); + + if (res.isNonZero()) + return res; } if (shardStoreExists()) { uint256 hash; doLedger(ledgerIndex, [&](soci::session& session) { - hash = ripple::getHashByIndex(session, ledgerIndex); + hash = detail::getHashByIndex(session, ledgerIndex); return true; }); return hash; @@ -944,19 +977,22 @@ RelationalDBInterfaceSqliteImp::getHashByIndex(LedgerIndex ledgerIndex) } std::optional -RelationalDBInterfaceSqliteImp::getHashesByIndex(LedgerIndex ledgerIndex) +SQLiteDatabaseImp::getHashesByIndex(LedgerIndex ledgerIndex) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getHashesByIndex(*db, ledgerIndex, j_); + auto const res = detail::getHashesByIndex(*db, ledgerIndex, j_); + + if (res.has_value()) + return res; } if (shardStoreExists()) { std::optional res; doLedger(ledgerIndex, [&](soci::session& session) { - res = ripple::getHashesByIndex(session, ledgerIndex, j_); + res = detail::getHashesByIndex(session, ledgerIndex, j_); return true; }); return res; @@ -966,14 +1002,15 @@ RelationalDBInterfaceSqliteImp::getHashesByIndex(LedgerIndex ledgerIndex) } std::map -RelationalDBInterfaceSqliteImp::getHashesByIndex( - LedgerIndex minSeq, - LedgerIndex maxSeq) +SQLiteDatabaseImp::getHashesByIndex(LedgerIndex minSeq, LedgerIndex maxSeq) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::getHashesByIndex(*db, minSeq, maxSeq, j_); + auto const res = detail::getHashesByIndex(*db, minSeq, maxSeq, j_); + + if (!res.empty()) + return res; } if (shardStoreExists()) @@ -986,7 +1023,7 @@ RelationalDBInterfaceSqliteImp::getHashesByIndex( shardMaxSeq = maxSeq; doLedger(minSeq, [&](soci::session& session) { auto r = - ripple::getHashesByIndex(session, minSeq, shardMaxSeq, j_); + detail::getHashesByIndex(session, minSeq, shardMaxSeq, j_); res.insert(r.begin(), r.end()); return true; }); @@ -1000,7 +1037,7 @@ RelationalDBInterfaceSqliteImp::getHashesByIndex( } std::vector> -RelationalDBInterfaceSqliteImp::getTxHistory(LedgerIndex startIndex) +SQLiteDatabaseImp::getTxHistory(LedgerIndex startIndex) { if (!useTxTables_) return {}; @@ -1008,7 +1045,11 @@ RelationalDBInterfaceSqliteImp::getTxHistory(LedgerIndex startIndex) if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getTxHistory(*db, app_, startIndex, 20, false).first; + auto const res = + detail::getTxHistory(*db, app_, startIndex, 20, false).first; + + if (!res.empty()) + return res; } if (shardStoreExists()) @@ -1017,7 +1058,7 @@ RelationalDBInterfaceSqliteImp::getTxHistory(LedgerIndex startIndex) int quantity = 20; iterateTransactionBack( {}, [&](soci::session& session, std::uint32_t shardIndex) { - auto [tx, total] = ripple::getTxHistory( + auto [tx, total] = detail::getTxHistory( session, app_, startIndex, quantity, true); txs.insert(txs.end(), tx.begin(), tx.end()); if (total > 0) @@ -1040,9 +1081,8 @@ RelationalDBInterfaceSqliteImp::getTxHistory(LedgerIndex startIndex) return {}; } -RelationalDBInterface::AccountTxs -RelationalDBInterfaceSqliteImp::getOldestAccountTxs( - AccountTxOptions const& options) +RelationalDatabase::AccountTxs +SQLiteDatabaseImp::getOldestAccountTxs(AccountTxOptions const& options) { if (!useTxTables_) return {}; @@ -1052,7 +1092,7 @@ RelationalDBInterfaceSqliteImp::getOldestAccountTxs( if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getOldestAccountTxs( + return detail::getOldestAccountTxs( *db, app_, ledgerMaster, options, {}, j_) .first; } @@ -1069,7 +1109,7 @@ RelationalDBInterfaceSqliteImp::getOldestAccountTxs( if (opt.maxLedger && shardIndex > seqToShardIndex(opt.maxLedger)) return false; - auto [r, total] = ripple::getOldestAccountTxs( + auto [r, total] = detail::getOldestAccountTxs( session, app_, ledgerMaster, opt, limit_used, j_); ret.insert(ret.end(), r.begin(), r.end()); if (!total) @@ -1101,9 +1141,8 @@ RelationalDBInterfaceSqliteImp::getOldestAccountTxs( return {}; } -RelationalDBInterface::AccountTxs -RelationalDBInterfaceSqliteImp::getNewestAccountTxs( - AccountTxOptions const& options) +RelationalDatabase::AccountTxs +SQLiteDatabaseImp::getNewestAccountTxs(AccountTxOptions const& options) { if (!useTxTables_) return {}; @@ -1113,7 +1152,7 @@ RelationalDBInterfaceSqliteImp::getNewestAccountTxs( if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getNewestAccountTxs( + return detail::getNewestAccountTxs( *db, app_, ledgerMaster, options, {}, j_) .first; } @@ -1130,7 +1169,7 @@ RelationalDBInterfaceSqliteImp::getNewestAccountTxs( if (opt.minLedger && shardIndex < seqToShardIndex(opt.minLedger)) return false; - auto [r, total] = ripple::getNewestAccountTxs( + auto [r, total] = detail::getNewestAccountTxs( session, app_, ledgerMaster, opt, limit_used, j_); ret.insert(ret.end(), r.begin(), r.end()); if (!total) @@ -1162,9 +1201,8 @@ RelationalDBInterfaceSqliteImp::getNewestAccountTxs( return {}; } -RelationalDBInterface::MetaTxsList -RelationalDBInterfaceSqliteImp::getOldestAccountTxsB( - AccountTxOptions const& options) +RelationalDatabase::MetaTxsList +SQLiteDatabaseImp::getOldestAccountTxsB(AccountTxOptions const& options) { if (!useTxTables_) return {}; @@ -1172,7 +1210,7 @@ RelationalDBInterfaceSqliteImp::getOldestAccountTxsB( if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getOldestAccountTxsB(*db, app_, options, {}, j_).first; + return detail::getOldestAccountTxsB(*db, app_, options, {}, j_).first; } if (shardStoreExists()) @@ -1187,7 +1225,7 @@ RelationalDBInterfaceSqliteImp::getOldestAccountTxsB( if (opt.maxLedger && shardIndex > seqToShardIndex(opt.maxLedger)) return false; - auto [r, total] = ripple::getOldestAccountTxsB( + auto [r, total] = detail::getOldestAccountTxsB( session, app_, opt, limit_used, j_); ret.insert(ret.end(), r.begin(), r.end()); if (!total) @@ -1219,9 +1257,8 @@ RelationalDBInterfaceSqliteImp::getOldestAccountTxsB( return {}; } -RelationalDBInterface::MetaTxsList -RelationalDBInterfaceSqliteImp::getNewestAccountTxsB( - AccountTxOptions const& options) +RelationalDatabase::MetaTxsList +SQLiteDatabaseImp::getNewestAccountTxsB(AccountTxOptions const& options) { if (!useTxTables_) return {}; @@ -1229,7 +1266,7 @@ RelationalDBInterfaceSqliteImp::getNewestAccountTxsB( if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getNewestAccountTxsB(*db, app_, options, {}, j_).first; + return detail::getNewestAccountTxsB(*db, app_, options, {}, j_).first; } if (shardStoreExists()) @@ -1244,7 +1281,7 @@ RelationalDBInterfaceSqliteImp::getNewestAccountTxsB( if (opt.minLedger && shardIndex < seqToShardIndex(opt.minLedger)) return false; - auto [r, total] = ripple::getNewestAccountTxsB( + auto [r, total] = detail::getNewestAccountTxsB( session, app_, opt, limit_used, j_); ret.insert(ret.end(), r.begin(), r.end()); if (!total) @@ -1277,10 +1314,9 @@ RelationalDBInterfaceSqliteImp::getNewestAccountTxsB( } std::pair< - RelationalDBInterface::AccountTxs, - std::optional> -RelationalDBInterfaceSqliteImp::oldestAccountTxPage( - AccountTxPageOptions const& options) + RelationalDatabase::AccountTxs, + std::optional> +SQLiteDatabaseImp::oldestAccountTxPage(AccountTxPageOptions const& options) { if (!useTxTables_) return {}; @@ -1302,7 +1338,7 @@ RelationalDBInterfaceSqliteImp::oldestAccountTxPage( if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = ripple::oldestAccountTxPage( + auto newmarker = detail::oldestAccountTxPage( *db, idCache, onUnsavedLedger, @@ -1325,7 +1361,7 @@ RelationalDBInterfaceSqliteImp::oldestAccountTxPage( if (opt.maxLedger != UINT32_MAX && shardIndex > seqToShardIndex(opt.minLedger)) return false; - auto [marker, total] = ripple::oldestAccountTxPage( + auto [marker, total] = detail::oldestAccountTxPage( session, idCache, onUnsavedLedger, @@ -1347,10 +1383,9 @@ RelationalDBInterfaceSqliteImp::oldestAccountTxPage( } std::pair< - RelationalDBInterface::AccountTxs, - std::optional> -RelationalDBInterfaceSqliteImp::newestAccountTxPage( - AccountTxPageOptions const& options) + RelationalDatabase::AccountTxs, + std::optional> +SQLiteDatabaseImp::newestAccountTxPage(AccountTxPageOptions const& options) { if (!useTxTables_) return {}; @@ -1372,7 +1407,7 @@ RelationalDBInterfaceSqliteImp::newestAccountTxPage( if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = ripple::newestAccountTxPage( + auto newmarker = detail::newestAccountTxPage( *db, idCache, onUnsavedLedger, @@ -1395,7 +1430,7 @@ RelationalDBInterfaceSqliteImp::newestAccountTxPage( if (opt.minLedger && shardIndex < seqToShardIndex(opt.minLedger)) return false; - auto [marker, total] = ripple::newestAccountTxPage( + auto [marker, total] = detail::newestAccountTxPage( session, idCache, onUnsavedLedger, @@ -1417,10 +1452,9 @@ RelationalDBInterfaceSqliteImp::newestAccountTxPage( } std::pair< - RelationalDBInterface::MetaTxsList, - std::optional> -RelationalDBInterfaceSqliteImp::oldestAccountTxPageB( - AccountTxPageOptions const& options) + RelationalDatabase::MetaTxsList, + std::optional> +SQLiteDatabaseImp::oldestAccountTxPageB(AccountTxPageOptions const& options) { if (!useTxTables_) return {}; @@ -1441,7 +1475,7 @@ RelationalDBInterfaceSqliteImp::oldestAccountTxPageB( if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = ripple::oldestAccountTxPage( + auto newmarker = detail::oldestAccountTxPage( *db, idCache, onUnsavedLedger, @@ -1464,7 +1498,7 @@ RelationalDBInterfaceSqliteImp::oldestAccountTxPageB( if (opt.maxLedger != UINT32_MAX && shardIndex > seqToShardIndex(opt.minLedger)) return false; - auto [marker, total] = ripple::oldestAccountTxPage( + auto [marker, total] = detail::oldestAccountTxPage( session, idCache, onUnsavedLedger, @@ -1486,10 +1520,9 @@ RelationalDBInterfaceSqliteImp::oldestAccountTxPageB( } std::pair< - RelationalDBInterface::MetaTxsList, - std::optional> -RelationalDBInterfaceSqliteImp::newestAccountTxPageB( - AccountTxPageOptions const& options) + RelationalDatabase::MetaTxsList, + std::optional> +SQLiteDatabaseImp::newestAccountTxPageB(AccountTxPageOptions const& options) { if (!useTxTables_) return {}; @@ -1510,7 +1543,7 @@ RelationalDBInterfaceSqliteImp::newestAccountTxPageB( if (existsTransaction()) { auto db = checkoutTransaction(); - auto newmarker = ripple::newestAccountTxPage( + auto newmarker = detail::newestAccountTxPage( *db, idCache, onUnsavedLedger, @@ -1533,7 +1566,7 @@ RelationalDBInterfaceSqliteImp::newestAccountTxPageB( if (opt.minLedger && shardIndex < seqToShardIndex(opt.minLedger)) return false; - auto [marker, total] = ripple::newestAccountTxPage( + auto [marker, total] = detail::newestAccountTxPage( session, idCache, onUnsavedLedger, @@ -1554,8 +1587,8 @@ RelationalDBInterfaceSqliteImp::newestAccountTxPageB( return {}; } -std::variant -RelationalDBInterfaceSqliteImp::getTransaction( +std::variant +SQLiteDatabaseImp::getTransaction( uint256 const& id, std::optional> const& range, error_code_i& ec) @@ -1566,7 +1599,7 @@ RelationalDBInterfaceSqliteImp::getTransaction( if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::getTransaction(*db, app_, id, range, ec); + return detail::getTransaction(*db, app_, id, range, ec); } if (auto shardStore = app_.getShardStore(); shardStore) @@ -1575,7 +1608,7 @@ RelationalDBInterfaceSqliteImp::getTransaction( auto txMetaSession = txMetaDB_->checkoutDb(); if (auto const shardIndex = - ripple::getShardIndexforTransaction(*txMetaSession, id)) + detail::getShardIndexforTransaction(*txMetaSession, id)) { shardStore->callForTransactionSQLByShardIndex( *shardIndex, [&](soci::session& session) { @@ -1589,7 +1622,7 @@ RelationalDBInterfaceSqliteImp::getTransaction( if (low <= high) range1 = ClosedInterval(low, high); } - res = ripple::getTransaction(session, app_, id, range1, ec); + res = detail::getTransaction(session, app_, id, range1, ec); return res.index() == 1 && std::get(res) != @@ -1604,19 +1637,19 @@ RelationalDBInterfaceSqliteImp::getTransaction( } bool -RelationalDBInterfaceSqliteImp::ledgerDbHasSpace(Config const& config) +SQLiteDatabaseImp::ledgerDbHasSpace(Config const& config) { if (existsLedger()) { auto db = checkoutLedger(); - return ripple::dbHasSpace(*db, config, j_); + return detail::dbHasSpace(*db, config, j_); } if (shardStoreExists()) { return iterateLedgerBack( {}, [&](soci::session& session, std::uint32_t shardIndex) { - return ripple::dbHasSpace(session, config, j_); + return detail::dbHasSpace(session, config, j_); }); } @@ -1624,7 +1657,7 @@ RelationalDBInterfaceSqliteImp::ledgerDbHasSpace(Config const& config) } bool -RelationalDBInterfaceSqliteImp::transactionDbHasSpace(Config const& config) +SQLiteDatabaseImp::transactionDbHasSpace(Config const& config) { if (!useTxTables_) return true; @@ -1632,14 +1665,14 @@ RelationalDBInterfaceSqliteImp::transactionDbHasSpace(Config const& config) if (existsTransaction()) { auto db = checkoutTransaction(); - return ripple::dbHasSpace(*db, config, j_); + return detail::dbHasSpace(*db, config, j_); } if (shardStoreExists()) { return iterateTransactionBack( {}, [&](soci::session& session, std::uint32_t shardIndex) { - return ripple::dbHasSpace(session, config, j_); + return detail::dbHasSpace(session, config, j_); }); } @@ -1647,7 +1680,7 @@ RelationalDBInterfaceSqliteImp::transactionDbHasSpace(Config const& config) } std::uint32_t -RelationalDBInterfaceSqliteImp::getKBUsedAll() +SQLiteDatabaseImp::getKBUsedAll() { if (existsLedger()) { @@ -1669,7 +1702,7 @@ RelationalDBInterfaceSqliteImp::getKBUsedAll() } std::uint32_t -RelationalDBInterfaceSqliteImp::getKBUsedLedger() +SQLiteDatabaseImp::getKBUsedLedger() { if (existsLedger()) { @@ -1691,7 +1724,7 @@ RelationalDBInterfaceSqliteImp::getKBUsedLedger() } std::uint32_t -RelationalDBInterfaceSqliteImp::getKBUsedTransaction() +SQLiteDatabaseImp::getKBUsedTransaction() { if (!useTxTables_) return 0; @@ -1716,25 +1749,21 @@ RelationalDBInterfaceSqliteImp::getKBUsedTransaction() } void -RelationalDBInterfaceSqliteImp::closeLedgerDB() +SQLiteDatabaseImp::closeLedgerDB() { lgrdb_.reset(); } void -RelationalDBInterfaceSqliteImp::closeTransactionDB() +SQLiteDatabaseImp::closeTransactionDB() { txdb_.reset(); } -std::unique_ptr -getRelationalDBInterfaceSqlite( - Application& app, - Config const& config, - JobQueue& jobQueue) +std::unique_ptr +getSQLiteDatabase(Application& app, Config const& config, JobQueue& jobQueue) { - return std::make_unique( - app, config, jobQueue); + return std::make_unique(app, config, jobQueue); } } // namespace ripple diff --git a/src/ripple/app/rdb/impl/Download.cpp b/src/ripple/app/rdb/impl/Download.cpp new file mode 100644 index 00000000000..0905ee577b1 --- /dev/null +++ b/src/ripple/app/rdb/impl/Download.cpp @@ -0,0 +1,152 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace ripple { + +std::pair, std::optional> +openDatabaseBodyDb( + DatabaseCon::Setup const& setup, + boost::filesystem::path const& path) +{ + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional pathFromDb; + boost::optional size; + + auto conn = std::make_unique( + setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); + + auto& session = *conn->checkoutDb(); + + session << "SELECT Path FROM Download WHERE Part=0;", + soci::into(pathFromDb); + + // Try to reuse preexisting + // database. + if (pathFromDb) + { + // Can't resuse - database was + // from a different file download. + if (pathFromDb != path.string()) + { + session << "DROP TABLE Download;"; + } + + // Continuing a file download. + else + { + session << "SELECT SUM(LENGTH(Data)) FROM Download;", + soci::into(size); + } + } + + return {std::move(conn), (size ? *size : std::optional())}; +} + +std::uint64_t +databaseBodyDoPut( + soci::session& session, + std::string const& data, + std::string const& path, + std::uint64_t fileSize, + std::uint64_t part, + std::uint16_t maxRowSizePad) +{ + std::uint64_t rowSize = 0; + soci::indicator rti; + + std::uint64_t remainingInRow = 0; + + auto be = + dynamic_cast(session.get_backend()); + BOOST_ASSERT(be); + + // This limits how large we can make the blob + // in each row. Also subtract a pad value to + // account for the other values in the row. + auto const blobMaxSize = + sqlite_api::sqlite3_limit(be->conn_, SQLITE_LIMIT_LENGTH, -1) - + maxRowSizePad; + + std::string newpath; + + auto rowInit = [&] { + session << "INSERT INTO Download VALUES (:path, zeroblob(0), 0, :part)", + soci::use(newpath), soci::use(part); + + remainingInRow = blobMaxSize; + rowSize = 0; + }; + + session << "SELECT Path,Size,Part FROM Download ORDER BY Part DESC " + "LIMIT 1", + soci::into(newpath), soci::into(rowSize), soci::into(part, rti); + + if (!session.got_data()) + { + newpath = path; + rowInit(); + } + else + remainingInRow = blobMaxSize - rowSize; + + auto insert = [&session, &rowSize, &part, &fs = fileSize]( + auto const& data) { + std::uint64_t updatedSize = rowSize + data.size(); + + session << "UPDATE Download SET Data = CAST(Data || :data AS blob), " + "Size = :size WHERE Part = :part;", + soci::use(data), soci::use(updatedSize), soci::use(part); + + fs += data.size(); + }; + + size_t currentBase = 0; + + while (currentBase + remainingInRow < data.size()) + { + if (remainingInRow) + { + insert(data.substr(currentBase, remainingInRow)); + currentBase += remainingInRow; + } + + ++part; + rowInit(); + } + + insert(data.substr(currentBase)); + + return part; +} + +void +databaseBodyFinish(soci::session& session, std::ofstream& fout) +{ + soci::rowset rs = + (session.prepare << "SELECT Data FROM Download ORDER BY PART ASC;"); + + // iteration through the resultset: + for (auto it = rs.begin(); it != rs.end(); ++it) + fout.write(it->data(), it->size()); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/PeerFinder.cpp b/src/ripple/app/rdb/impl/PeerFinder.cpp new file mode 100644 index 00000000000..46dca3760c7 --- /dev/null +++ b/src/ripple/app/rdb/impl/PeerFinder.cpp @@ -0,0 +1,271 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { + +void +initPeerFinderDB( + soci::session& session, + BasicConfig const& config, + beast::Journal j) +{ + DBConfig m_sociConfig(config, "peerfinder"); + m_sociConfig.open(session); + + JLOG(j.info()) << "Opening database at '" << m_sociConfig.connectionString() + << "'"; + + soci::transaction tr(session); + session << "PRAGMA encoding=\"UTF-8\";"; + + session << "CREATE TABLE IF NOT EXISTS SchemaVersion ( " + " name TEXT PRIMARY KEY, " + " version INTEGER" + ");"; + + session << "CREATE TABLE IF NOT EXISTS PeerFinder_BootstrapCache ( " + " id INTEGER PRIMARY KEY AUTOINCREMENT, " + " address TEXT UNIQUE NOT NULL, " + " valence INTEGER" + ");"; + + session << "CREATE INDEX IF NOT EXISTS " + " PeerFinder_BootstrapCache_Index ON " + "PeerFinder_BootstrapCache " + " ( " + " address " + " ); "; + + tr.commit(); +} + +void +updatePeerFinderDB( + soci::session& session, + int currentSchemaVersion, + beast::Journal j) +{ + soci::transaction tr(session); + // get version + int version(0); + { + // SOCI requires a boost::optional (not std::optional) parameter. + boost::optional vO; + session << "SELECT " + " version " + "FROM SchemaVersion WHERE " + " name = 'PeerFinder';", + soci::into(vO); + + version = vO.value_or(0); + + JLOG(j.info()) << "Opened version " << version << " database"; + } + + { + if (version < currentSchemaVersion) + { + JLOG(j.info()) << "Updating database to version " + << currentSchemaVersion; + } + else if (version > currentSchemaVersion) + { + Throw( + "The PeerFinder database version is higher than expected"); + } + } + + if (version < 4) + { + // + // Remove the "uptime" column from the bootstrap table + // + + session << "CREATE TABLE IF NOT EXISTS " + "PeerFinder_BootstrapCache_Next ( " + " id INTEGER PRIMARY KEY AUTOINCREMENT, " + " address TEXT UNIQUE NOT NULL, " + " valence INTEGER" + ");"; + + session << "CREATE INDEX IF NOT EXISTS " + " PeerFinder_BootstrapCache_Next_Index ON " + " PeerFinder_BootstrapCache_Next " + " ( address ); "; + + std::size_t count; + session << "SELECT COUNT(*) FROM PeerFinder_BootstrapCache;", + soci::into(count); + + std::vector list; + + { + list.reserve(count); + std::string s; + int valence; + soci::statement st = + (session.prepare << "SELECT " + " address, " + " valence " + "FROM PeerFinder_BootstrapCache;", + soci::into(s), + soci::into(valence)); + + st.execute(); + while (st.fetch()) + { + PeerFinder::Store::Entry entry; + entry.endpoint = beast::IP::Endpoint::from_string(s); + if (!is_unspecified(entry.endpoint)) + { + entry.valence = valence; + list.push_back(entry); + } + else + { + JLOG(j.error()) << "Bad address string '" << s + << "' in Bootcache table"; + } + } + } + + if (!list.empty()) + { + std::vector s; + std::vector valence; + s.reserve(list.size()); + valence.reserve(list.size()); + + for (auto iter(list.cbegin()); iter != list.cend(); ++iter) + { + s.emplace_back(to_string(iter->endpoint)); + valence.emplace_back(iter->valence); + } + + session << "INSERT INTO PeerFinder_BootstrapCache_Next ( " + " address, " + " valence " + ") VALUES ( " + " :s, :valence" + ");", + soci::use(s), soci::use(valence); + } + + session << "DROP TABLE IF EXISTS PeerFinder_BootstrapCache;"; + + session << "DROP INDEX IF EXISTS PeerFinder_BootstrapCache_Index;"; + + session << "ALTER TABLE PeerFinder_BootstrapCache_Next " + " RENAME TO PeerFinder_BootstrapCache;"; + + session << "CREATE INDEX IF NOT EXISTS " + " PeerFinder_BootstrapCache_Index ON " + "PeerFinder_BootstrapCache " + " ( " + " address " + " ); "; + } + + if (version < 3) + { + // + // Remove legacy endpoints from the schema + // + + session << "DROP TABLE IF EXISTS LegacyEndpoints;"; + + session << "DROP TABLE IF EXISTS PeerFinderLegacyEndpoints;"; + + session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints;"; + + session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints_Index;"; + } + + { + int const v(currentSchemaVersion); + session << "INSERT OR REPLACE INTO SchemaVersion (" + " name " + " ,version " + ") VALUES ( " + " 'PeerFinder', :version " + ");", + soci::use(v); + } + + tr.commit(); +} + +void +readPeerFinderDB( + soci::session& session, + std::function const& func) +{ + std::string s; + int valence; + soci::statement st = + (session.prepare << "SELECT " + " address, " + " valence " + "FROM PeerFinder_BootstrapCache;", + soci::into(s), + soci::into(valence)); + + st.execute(); + while (st.fetch()) + { + func(s, valence); + } +} + +void +savePeerFinderDB( + soci::session& session, + std::vector const& v) +{ + soci::transaction tr(session); + session << "DELETE FROM PeerFinder_BootstrapCache;"; + + if (!v.empty()) + { + std::vector s; + std::vector valence; + s.reserve(v.size()); + valence.reserve(v.size()); + + for (auto const& e : v) + { + s.emplace_back(to_string(e.endpoint)); + valence.emplace_back(e.valence); + } + + session << "INSERT INTO PeerFinder_BootstrapCache ( " + " address, " + " valence " + ") VALUES ( " + " :s, :valence " + ");", + soci::use(s), soci::use(valence); + } + + tr.commit(); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp b/src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp deleted file mode 100644 index 17b86f0cabc..00000000000 --- a/src/ripple/app/rdb/impl/RelationalDBInterface_global.cpp +++ /dev/null @@ -1,836 +0,0 @@ -//------------------------------------------------------------------------------ -/* - This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted, provided that the above - copyright notice and this permission notice appear in all copies. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -*/ -//============================================================================== - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ripple { - -/* Wallet DB */ - -std::unique_ptr -makeWalletDB(DatabaseCon::Setup const& setup) -{ - // wallet database - return std::make_unique( - setup, WalletDBName, std::array(), WalletDBInit); -} - -std::unique_ptr -makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname) -{ - // wallet database - return std::make_unique( - setup, dbname.data(), std::array(), WalletDBInit); -} - -void -getManifests( - soci::session& session, - std::string const& dbTable, - ManifestCache& mCache, - beast::Journal j) -{ - // Load manifests stored in database - std::string const sql = "SELECT RawData FROM " + dbTable + ";"; - soci::blob sociRawData(session); - soci::statement st = (session.prepare << sql, soci::into(sociRawData)); - st.execute(); - while (st.fetch()) - { - std::string serialized; - convert(sociRawData, serialized); - if (auto mo = deserializeManifest(serialized)) - { - if (!mo->verify()) - { - JLOG(j.warn()) << "Unverifiable manifest in db"; - continue; - } - - mCache.applyManifest(std::move(*mo)); - } - else - { - JLOG(j.warn()) << "Malformed manifest in database"; - } - } -} - -static void -saveManifest( - soci::session& session, - std::string const& dbTable, - std::string const& serialized) -{ - // soci does not support bulk insertion of blob data - // Do not reuse blob because manifest ecdsa signatures vary in length - // but blob write length is expected to be >= the last write - soci::blob rawData(session); - convert(serialized, rawData); - session << "INSERT INTO " << dbTable << " (RawData) VALUES (:rawData);", - soci::use(rawData); -} - -void -saveManifests( - soci::session& session, - std::string const& dbTable, - std::function const& isTrusted, - hash_map const& map, - beast::Journal j) -{ - soci::transaction tr(session); - session << "DELETE FROM " << dbTable; - for (auto const& v : map) - { - // Save all revocation manifests, - // but only save trusted non-revocation manifests. - if (!v.second.revoked() && !isTrusted(v.second.masterKey)) - { - JLOG(j.info()) << "Untrusted manifest in cache not saved to db"; - continue; - } - - saveManifest(session, dbTable, v.second.serialized); - } - tr.commit(); -} - -void -addValidatorManifest(soci::session& session, std::string const& serialized) -{ - soci::transaction tr(session); - saveManifest(session, "ValidatorManifests", serialized); - tr.commit(); -} - -std::pair -getNodeIdentity(soci::session& session) -{ - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional pubKO, priKO; - soci::statement st = - (session.prepare - << "SELECT PublicKey, PrivateKey FROM NodeIdentity;", - soci::into(pubKO), - soci::into(priKO)); - st.execute(); - while (st.fetch()) - { - auto const sk = parseBase58( - TokenType::NodePrivate, priKO.value_or("")); - auto const pk = parseBase58( - TokenType::NodePublic, pubKO.value_or("")); - - // Only use if the public and secret keys are a pair - if (sk && pk && (*pk == derivePublicKey(KeyType::secp256k1, *sk))) - return {*pk, *sk}; - } - } - - // If a valid identity wasn't found, we randomly generate a new one: - auto [newpublicKey, newsecretKey] = randomKeyPair(KeyType::secp256k1); - - session << str( - boost::format("INSERT INTO NodeIdentity (PublicKey,PrivateKey) " - "VALUES ('%s','%s');") % - toBase58(TokenType::NodePublic, newpublicKey) % - toBase58(TokenType::NodePrivate, newsecretKey)); - - return {newpublicKey, newsecretKey}; -} - -std::unordered_set, KeyEqual> -getPeerReservationTable(soci::session& session, beast::Journal j) -{ - std::unordered_set, KeyEqual> table; - // These values must be boost::optionals (not std) because SOCI expects - // boost::optionals. - boost::optional valPubKey, valDesc; - // We should really abstract the table and column names into constants, - // but no one else does. Because it is too tedious? It would be easy if we - // had a jOOQ for C++. - soci::statement st = - (session.prepare - << "SELECT PublicKey, Description FROM PeerReservations;", - soci::into(valPubKey), - soci::into(valDesc)); - st.execute(); - while (st.fetch()) - { - if (!valPubKey || !valDesc) - { - // This represents a `NULL` in a `NOT NULL` column. It should be - // unreachable. - continue; - } - auto const optNodeId = - parseBase58(TokenType::NodePublic, *valPubKey); - if (!optNodeId) - { - JLOG(j.warn()) << "load: not a public key: " << valPubKey; - continue; - } - table.insert(PeerReservation{*optNodeId, *valDesc}); - } - - return table; -} - -void -insertPeerReservation( - soci::session& session, - PublicKey const& nodeId, - std::string const& description) -{ - session << "INSERT INTO PeerReservations (PublicKey, Description) " - "VALUES (:nodeId, :desc) " - "ON CONFLICT (PublicKey) DO UPDATE SET " - "Description=excluded.Description", - soci::use(toBase58(TokenType::NodePublic, nodeId)), - soci::use(description); -} - -void -deletePeerReservation(soci::session& session, PublicKey const& nodeId) -{ - session << "DELETE FROM PeerReservations WHERE PublicKey = :nodeId", - soci::use(toBase58(TokenType::NodePublic, nodeId)); -} - -bool -createFeatureVotes(soci::session& session) -{ - soci::transaction tr(session); - std::string sql = - "SELECT count(*) FROM sqlite_master " - "WHERE type='table' AND name='FeatureVotes'"; - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional featureVotesCount; - session << sql, soci::into(featureVotesCount); - bool exists = static_cast(*featureVotesCount); - - // Create FeatureVotes table in WalletDB if it doesn't exist - if (!exists) - { - session << "CREATE TABLE FeatureVotes ( " - "AmendmentHash CHARACTER(64) NOT NULL, " - "AmendmentName TEXT, " - "Veto INTEGER NOT NULL );"; - tr.commit(); - } - return exists; -} - -void -readAmendments( - soci::session& session, - std::function amendment_hash, - boost::optional amendment_name, - boost::optional vote)> const& callback) -{ - // lambda that converts the internally stored int to an AmendmentVote. - auto intToVote = [](boost::optional const& dbVote) - -> boost::optional { - return safe_cast(dbVote.value_or(1)); - }; - - soci::transaction tr(session); - std::string sql = - "SELECT AmendmentHash, AmendmentName, Veto FROM FeatureVotes"; - // SOCI requires boost::optional (not std::optional) as parameters. - boost::optional amendment_hash; - boost::optional amendment_name; - boost::optional vote_to_veto; - soci::statement st = - (session.prepare << sql, - soci::into(amendment_hash), - soci::into(amendment_name), - soci::into(vote_to_veto)); - st.execute(); - while (st.fetch()) - { - callback(amendment_hash, amendment_name, intToVote(vote_to_veto)); - } -} - -void -voteAmendment( - soci::session& session, - uint256 const& amendment, - std::string const& name, - AmendmentVote vote) -{ - soci::transaction tr(session); - std::string sql = - "INSERT INTO FeatureVotes (AmendmentHash, AmendmentName, Veto) VALUES " - "('"; - sql += to_string(amendment); - sql += "', '" + name; - sql += "', '" + std::to_string(safe_cast(vote)) + "');"; - session << sql; - tr.commit(); -} - -/* State DB */ - -void -initStateDB( - soci::session& session, - BasicConfig const& config, - std::string const& dbName) -{ - open(session, config, dbName); - - session << "PRAGMA synchronous=FULL;"; - - session << "CREATE TABLE IF NOT EXISTS DbState (" - " Key INTEGER PRIMARY KEY," - " WritableDb TEXT," - " ArchiveDb TEXT," - " LastRotatedLedger INTEGER" - ");"; - - session << "CREATE TABLE IF NOT EXISTS CanDelete (" - " Key INTEGER PRIMARY KEY," - " CanDeleteSeq INTEGER" - ");"; - - std::int64_t count = 0; - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional countO; - session << "SELECT COUNT(Key) FROM DbState WHERE Key = 1;", - soci::into(countO); - if (!countO) - Throw( - "Failed to fetch Key Count from DbState."); - count = *countO; - } - - if (!count) - { - session << "INSERT INTO DbState VALUES (1, '', '', 0);"; - } - - { - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional countO; - session << "SELECT COUNT(Key) FROM CanDelete WHERE Key = 1;", - soci::into(countO); - if (!countO) - Throw( - "Failed to fetch Key Count from CanDelete."); - count = *countO; - } - - if (!count) - { - session << "INSERT INTO CanDelete VALUES (1, 0);"; - } -} - -LedgerIndex -getCanDelete(soci::session& session) -{ - LedgerIndex seq; - session << "SELECT CanDeleteSeq FROM CanDelete WHERE Key = 1;", - soci::into(seq); - ; - return seq; -} - -LedgerIndex -setCanDelete(soci::session& session, LedgerIndex canDelete) -{ - session << "UPDATE CanDelete SET CanDeleteSeq = :canDelete WHERE Key = 1;", - soci::use(canDelete); - return canDelete; -} - -SavedState -getSavedState(soci::session& session) -{ - SavedState state; - session << "SELECT WritableDb, ArchiveDb, LastRotatedLedger" - " FROM DbState WHERE Key = 1;", - soci::into(state.writableDb), soci::into(state.archiveDb), - soci::into(state.lastRotated); - - return state; -} - -void -setSavedState(soci::session& session, SavedState const& state) -{ - session << "UPDATE DbState" - " SET WritableDb = :writableDb," - " ArchiveDb = :archiveDb," - " LastRotatedLedger = :lastRotated" - " WHERE Key = 1;", - soci::use(state.writableDb), soci::use(state.archiveDb), - soci::use(state.lastRotated); -} - -void -setLastRotated(soci::session& session, LedgerIndex seq) -{ - session << "UPDATE DbState SET LastRotatedLedger = :seq" - " WHERE Key = 1;", - soci::use(seq); -} - -/* DatabaseBody DB */ - -std::pair, std::optional> -openDatabaseBodyDb( - DatabaseCon::Setup const& setup, - boost::filesystem::path const& path) -{ - // SOCI requires boost::optional (not std::optional) as the parameter. - boost::optional pathFromDb; - boost::optional size; - - auto conn = std::make_unique( - setup, "Download", DownloaderDBPragma, DatabaseBodyDBInit); - - auto& session = *conn->checkoutDb(); - - session << "SELECT Path FROM Download WHERE Part=0;", - soci::into(pathFromDb); - - // Try to reuse preexisting - // database. - if (pathFromDb) - { - // Can't resuse - database was - // from a different file download. - if (pathFromDb != path.string()) - { - session << "DROP TABLE Download;"; - } - - // Continuing a file download. - else - { - session << "SELECT SUM(LENGTH(Data)) FROM Download;", - soci::into(size); - } - } - - return {std::move(conn), (size ? *size : std::optional())}; -} - -std::uint64_t -databaseBodyDoPut( - soci::session& session, - std::string const& data, - std::string const& path, - std::uint64_t fileSize, - std::uint64_t part, - std::uint16_t maxRowSizePad) -{ - std::uint64_t rowSize = 0; - soci::indicator rti; - - std::uint64_t remainingInRow = 0; - - auto be = - dynamic_cast(session.get_backend()); - BOOST_ASSERT(be); - - // This limits how large we can make the blob - // in each row. Also subtract a pad value to - // account for the other values in the row. - auto const blobMaxSize = - sqlite_api::sqlite3_limit(be->conn_, SQLITE_LIMIT_LENGTH, -1) - - maxRowSizePad; - - std::string newpath; - - auto rowInit = [&] { - session << "INSERT INTO Download VALUES (:path, zeroblob(0), 0, :part)", - soci::use(newpath), soci::use(part); - - remainingInRow = blobMaxSize; - rowSize = 0; - }; - - session << "SELECT Path,Size,Part FROM Download ORDER BY Part DESC " - "LIMIT 1", - soci::into(newpath), soci::into(rowSize), soci::into(part, rti); - - if (!session.got_data()) - { - newpath = path; - rowInit(); - } - else - remainingInRow = blobMaxSize - rowSize; - - auto insert = [&session, &rowSize, &part, &fs = fileSize]( - auto const& data) { - std::uint64_t updatedSize = rowSize + data.size(); - - session << "UPDATE Download SET Data = CAST(Data || :data AS blob), " - "Size = :size WHERE Part = :part;", - soci::use(data), soci::use(updatedSize), soci::use(part); - - fs += data.size(); - }; - - size_t currentBase = 0; - - while (currentBase + remainingInRow < data.size()) - { - if (remainingInRow) - { - insert(data.substr(currentBase, remainingInRow)); - currentBase += remainingInRow; - } - - ++part; - rowInit(); - } - - insert(data.substr(currentBase)); - - return part; -} - -void -databaseBodyFinish(soci::session& session, std::ofstream& fout) -{ - soci::rowset rs = - (session.prepare << "SELECT Data FROM Download ORDER BY PART ASC;"); - - // iteration through the resultset: - for (auto it = rs.begin(); it != rs.end(); ++it) - fout.write(it->data(), it->size()); -} - -/* Vacuum DB */ - -bool -doVacuumDB(DatabaseCon::Setup const& setup) -{ - boost::filesystem::path dbPath = setup.dataDir / TxDBName; - - uintmax_t const dbSize = file_size(dbPath); - assert(dbSize != static_cast(-1)); - - if (auto available = space(dbPath.parent_path()).available; - available < dbSize) - { - std::cerr << "The database filesystem must have at least as " - "much free space as the size of " - << dbPath.string() << ", which is " << dbSize - << " bytes. Only " << available << " bytes are available.\n"; - return false; - } - - auto txnDB = - std::make_unique(setup, TxDBName, TxDBPragma, TxDBInit); - auto& session = txnDB->getSession(); - std::uint32_t pageSize; - - // Only the most trivial databases will fit in memory on typical - // (recommended) software. Force temp files to be written to disk - // regardless of the config settings. - session << boost::format(CommonDBPragmaTemp) % "file"; - session << "PRAGMA page_size;", soci::into(pageSize); - - std::cout << "VACUUM beginning. page_size: " << pageSize << std::endl; - - session << "VACUUM;"; - assert(setup.globalPragma); - for (auto const& p : *setup.globalPragma) - session << p; - session << "PRAGMA page_size;", soci::into(pageSize); - - std::cout << "VACUUM finished. page_size: " << pageSize << std::endl; - - return true; -} - -/* PeerFinder DB */ - -void -initPeerFinderDB( - soci::session& session, - BasicConfig const& config, - beast::Journal j) -{ - DBConfig m_sociConfig(config, "peerfinder"); - m_sociConfig.open(session); - - JLOG(j.info()) << "Opening database at '" << m_sociConfig.connectionString() - << "'"; - - soci::transaction tr(session); - session << "PRAGMA encoding=\"UTF-8\";"; - - session << "CREATE TABLE IF NOT EXISTS SchemaVersion ( " - " name TEXT PRIMARY KEY, " - " version INTEGER" - ");"; - - session << "CREATE TABLE IF NOT EXISTS PeerFinder_BootstrapCache ( " - " id INTEGER PRIMARY KEY AUTOINCREMENT, " - " address TEXT UNIQUE NOT NULL, " - " valence INTEGER" - ");"; - - session << "CREATE INDEX IF NOT EXISTS " - " PeerFinder_BootstrapCache_Index ON " - "PeerFinder_BootstrapCache " - " ( " - " address " - " ); "; - - tr.commit(); -} - -void -updatePeerFinderDB( - soci::session& session, - int currentSchemaVersion, - beast::Journal j) -{ - soci::transaction tr(session); - // get version - int version(0); - { - // SOCI requires a boost::optional (not std::optional) parameter. - boost::optional vO; - session << "SELECT " - " version " - "FROM SchemaVersion WHERE " - " name = 'PeerFinder';", - soci::into(vO); - - version = vO.value_or(0); - - JLOG(j.info()) << "Opened version " << version << " database"; - } - - { - if (version < currentSchemaVersion) - { - JLOG(j.info()) << "Updating database to version " - << currentSchemaVersion; - } - else if (version > currentSchemaVersion) - { - Throw( - "The PeerFinder database version is higher than expected"); - } - } - - if (version < 4) - { - // - // Remove the "uptime" column from the bootstrap table - // - - session << "CREATE TABLE IF NOT EXISTS " - "PeerFinder_BootstrapCache_Next ( " - " id INTEGER PRIMARY KEY AUTOINCREMENT, " - " address TEXT UNIQUE NOT NULL, " - " valence INTEGER" - ");"; - - session << "CREATE INDEX IF NOT EXISTS " - " PeerFinder_BootstrapCache_Next_Index ON " - " PeerFinder_BootstrapCache_Next " - " ( address ); "; - - std::size_t count; - session << "SELECT COUNT(*) FROM PeerFinder_BootstrapCache;", - soci::into(count); - - std::vector list; - - { - list.reserve(count); - std::string s; - int valence; - soci::statement st = - (session.prepare << "SELECT " - " address, " - " valence " - "FROM PeerFinder_BootstrapCache;", - soci::into(s), - soci::into(valence)); - - st.execute(); - while (st.fetch()) - { - PeerFinder::Store::Entry entry; - entry.endpoint = beast::IP::Endpoint::from_string(s); - if (!is_unspecified(entry.endpoint)) - { - entry.valence = valence; - list.push_back(entry); - } - else - { - JLOG(j.error()) << "Bad address string '" << s - << "' in Bootcache table"; - } - } - } - - if (!list.empty()) - { - std::vector s; - std::vector valence; - s.reserve(list.size()); - valence.reserve(list.size()); - - for (auto iter(list.cbegin()); iter != list.cend(); ++iter) - { - s.emplace_back(to_string(iter->endpoint)); - valence.emplace_back(iter->valence); - } - - session << "INSERT INTO PeerFinder_BootstrapCache_Next ( " - " address, " - " valence " - ") VALUES ( " - " :s, :valence" - ");", - soci::use(s), soci::use(valence); - } - - session << "DROP TABLE IF EXISTS PeerFinder_BootstrapCache;"; - - session << "DROP INDEX IF EXISTS PeerFinder_BootstrapCache_Index;"; - - session << "ALTER TABLE PeerFinder_BootstrapCache_Next " - " RENAME TO PeerFinder_BootstrapCache;"; - - session << "CREATE INDEX IF NOT EXISTS " - " PeerFinder_BootstrapCache_Index ON " - "PeerFinder_BootstrapCache " - " ( " - " address " - " ); "; - } - - if (version < 3) - { - // - // Remove legacy endpoints from the schema - // - - session << "DROP TABLE IF EXISTS LegacyEndpoints;"; - - session << "DROP TABLE IF EXISTS PeerFinderLegacyEndpoints;"; - - session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints;"; - - session << "DROP TABLE IF EXISTS PeerFinder_LegacyEndpoints_Index;"; - } - - { - int const v(currentSchemaVersion); - session << "INSERT OR REPLACE INTO SchemaVersion (" - " name " - " ,version " - ") VALUES ( " - " 'PeerFinder', :version " - ");", - soci::use(v); - } - - tr.commit(); -} - -void -readPeerFinderDB( - soci::session& session, - std::function const& func) -{ - std::string s; - int valence; - soci::statement st = - (session.prepare << "SELECT " - " address, " - " valence " - "FROM PeerFinder_BootstrapCache;", - soci::into(s), - soci::into(valence)); - - st.execute(); - while (st.fetch()) - { - func(s, valence); - } -} - -void -savePeerFinderDB( - soci::session& session, - std::vector const& v) -{ - soci::transaction tr(session); - session << "DELETE FROM PeerFinder_BootstrapCache;"; - - if (!v.empty()) - { - std::vector s; - std::vector valence; - s.reserve(v.size()); - valence.reserve(v.size()); - - for (auto const& e : v) - { - s.emplace_back(to_string(e.endpoint)); - valence.emplace_back(e.valence); - } - - session << "INSERT INTO PeerFinder_BootstrapCache ( " - " address, " - " valence " - ") VALUES ( " - " :s, :valence " - ");", - soci::use(s), soci::use(valence); - } - - tr.commit(); -} - -} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface.cpp b/src/ripple/app/rdb/impl/RelationalDatabase.cpp similarity index 76% rename from src/ripple/app/rdb/impl/RelationalDBInterface.cpp rename to src/ripple/app/rdb/impl/RelationalDatabase.cpp index 1ef456bcb5f..8a3ce5b016d 100644 --- a/src/ripple/app/rdb/impl/RelationalDBInterface.cpp +++ b/src/ripple/app/rdb/impl/RelationalDatabase.cpp @@ -18,26 +18,20 @@ //============================================================================== #include -#include +#include #include #include namespace ripple { -extern std::unique_ptr -getRelationalDBInterfaceSqlite( - Application& app, - Config const& config, - JobQueue& jobQueue); +extern std::unique_ptr +getSQLiteDatabase(Application& app, Config const& config, JobQueue& jobQueue); -extern std::unique_ptr -getRelationalDBInterfacePostgres( - Application& app, - Config const& config, - JobQueue& jobQueue); +extern std::unique_ptr +getPostgresDatabase(Application& app, Config const& config, JobQueue& jobQueue); -std::unique_ptr -RelationalDBInterface::init( +std::unique_ptr +RelationalDatabase::init( Application& app, Config const& config, JobQueue& jobQueue) @@ -73,14 +67,14 @@ RelationalDBInterface::init( if (use_sqlite) { - return getRelationalDBInterfaceSqlite(app, config, jobQueue); + return getSQLiteDatabase(app, config, jobQueue); } else if (use_postgres) { - return getRelationalDBInterfacePostgres(app, config, jobQueue); + return getPostgresDatabase(app, config, jobQueue); } - return std::unique_ptr(); + return std::unique_ptr(); } } // namespace ripple diff --git a/src/ripple/app/rdb/impl/ShardArchive.cpp b/src/ripple/app/rdb/impl/ShardArchive.cpp new file mode 100644 index 00000000000..6880aa00136 --- /dev/null +++ b/src/ripple/app/rdb/impl/ShardArchive.cpp @@ -0,0 +1,68 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { + +std::unique_ptr +makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName) +{ + return std::make_unique( + dir, dbName, DownloaderDBPragma, ShardArchiveHandlerDBInit); +} + +void +readArchiveDB( + DatabaseCon& db, + std::function const& func) +{ + soci::rowset rs = + (db.getSession().prepare << "SELECT * FROM State;"); + + for (auto it = rs.begin(); it != rs.end(); ++it) + { + func(it->get(1), it->get(0)); + } +} + +void +insertArchiveDB( + DatabaseCon& db, + std::uint32_t shardIndex, + std::string const& url) +{ + db.getSession() << "INSERT INTO State VALUES (:index, :url);", + soci::use(shardIndex), soci::use(url); +} + +void +deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex) +{ + db.getSession() << "DELETE FROM State WHERE ShardIndex = :index;", + soci::use(shardIndex); +} + +void +dropArchiveDB(DatabaseCon& db) +{ + db.getSession() << "DROP TABLE State;"; +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/State.cpp b/src/ripple/app/rdb/impl/State.cpp new file mode 100644 index 00000000000..8f8beb0c7e1 --- /dev/null +++ b/src/ripple/app/rdb/impl/State.cpp @@ -0,0 +1,130 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +namespace ripple { + +void +initStateDB( + soci::session& session, + BasicConfig const& config, + std::string const& dbName) +{ + open(session, config, dbName); + + session << "PRAGMA synchronous=FULL;"; + + session << "CREATE TABLE IF NOT EXISTS DbState (" + " Key INTEGER PRIMARY KEY," + " WritableDb TEXT," + " ArchiveDb TEXT," + " LastRotatedLedger INTEGER" + ");"; + + session << "CREATE TABLE IF NOT EXISTS CanDelete (" + " Key INTEGER PRIMARY KEY," + " CanDeleteSeq INTEGER" + ");"; + + std::int64_t count = 0; + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional countO; + session << "SELECT COUNT(Key) FROM DbState WHERE Key = 1;", + soci::into(countO); + if (!countO) + Throw( + "Failed to fetch Key Count from DbState."); + count = *countO; + } + + if (!count) + { + session << "INSERT INTO DbState VALUES (1, '', '', 0);"; + } + + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional countO; + session << "SELECT COUNT(Key) FROM CanDelete WHERE Key = 1;", + soci::into(countO); + if (!countO) + Throw( + "Failed to fetch Key Count from CanDelete."); + count = *countO; + } + + if (!count) + { + session << "INSERT INTO CanDelete VALUES (1, 0);"; + } +} + +LedgerIndex +getCanDelete(soci::session& session) +{ + LedgerIndex seq; + session << "SELECT CanDeleteSeq FROM CanDelete WHERE Key = 1;", + soci::into(seq); + ; + return seq; +} + +LedgerIndex +setCanDelete(soci::session& session, LedgerIndex canDelete) +{ + session << "UPDATE CanDelete SET CanDeleteSeq = :canDelete WHERE Key = 1;", + soci::use(canDelete); + return canDelete; +} + +SavedState +getSavedState(soci::session& session) +{ + SavedState state; + session << "SELECT WritableDb, ArchiveDb, LastRotatedLedger" + " FROM DbState WHERE Key = 1;", + soci::into(state.writableDb), soci::into(state.archiveDb), + soci::into(state.lastRotated); + + return state; +} + +void +setSavedState(soci::session& session, SavedState const& state) +{ + session << "UPDATE DbState" + " SET WritableDb = :writableDb," + " ArchiveDb = :archiveDb," + " LastRotatedLedger = :lastRotated" + " WHERE Key = 1;", + soci::use(state.writableDb), soci::use(state.archiveDb), + soci::use(state.lastRotated); +} + +void +setLastRotated(soci::session& session, LedgerIndex seq) +{ + session << "UPDATE DbState SET LastRotatedLedger = :seq" + " WHERE Key = 1;", + soci::use(seq); +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp b/src/ripple/app/rdb/impl/UnitaryShard.cpp similarity index 68% rename from src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp rename to src/ripple/app/rdb/impl/UnitaryShard.cpp index 32dcfc25188..72441d0b75a 100644 --- a/src/ripple/app/rdb/impl/RelationalDBInterface_shards.cpp +++ b/src/ripple/app/rdb/impl/UnitaryShard.cpp @@ -1,7 +1,7 @@ //------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled - Copyright (c) 2020 Ripple Labs Inc. + Copyright (c) 2021 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above @@ -10,151 +10,20 @@ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ //============================================================================== -#include -#include -#include -#include -#include -#include +#include #include -#include -#include -#include -#include +#include #include namespace ripple { -DatabasePair -makeMetaDBs( - Config const& config, - DatabaseCon::Setup const& setup, - DatabaseCon::CheckpointerSetup const& checkpointerSetup) -{ - // ledger meta database - auto lgrMetaDB{std::make_unique( - setup, - LgrMetaDBName, - LgrMetaDBPragma, - LgrMetaDBInit, - checkpointerSetup)}; - - if (config.useTxTables()) - { - // transaction meta database - auto txMetaDB{std::make_unique( - setup, - TxMetaDBName, - TxMetaDBPragma, - TxMetaDBInit, - checkpointerSetup)}; - - return {std::move(lgrMetaDB), std::move(txMetaDB)}; - } - - return {std::move(lgrMetaDB), nullptr}; -} - -bool -saveLedgerMeta( - std::shared_ptr const& ledger, - Application& app, - soci::session& lgrMetaSession, - soci::session& txnMetaSession, - std::uint32_t const shardIndex) -{ - std::string_view constexpr lgrSQL = - R"sql(INSERT OR REPLACE INTO LedgerMeta VALUES - (:ledgerHash,:shardIndex);)sql"; - - auto const hash = to_string(ledger->info().hash); - lgrMetaSession << lgrSQL, soci::use(hash), soci::use(shardIndex); - - if (app.config().useTxTables()) - { - auto const aLedger = [&app, - ledger]() -> std::shared_ptr { - try - { - auto aLedger = - app.getAcceptedLedgerCache().fetch(ledger->info().hash); - if (!aLedger) - { - aLedger = std::make_shared(ledger, app); - app.getAcceptedLedgerCache().canonicalize_replace_client( - ledger->info().hash, aLedger); - } - - return aLedger; - } - catch (std::exception const&) - { - JLOG(app.journal("Ledger").warn()) - << "An accepted ledger was missing nodes"; - } - - return {}; - }(); - - if (!aLedger) - return false; - - soci::transaction tr(txnMetaSession); - - for (auto const& acceptedLedgerTx : *aLedger) - { - std::string_view constexpr txnSQL = - R"sql(INSERT OR REPLACE INTO TransactionMeta VALUES - (:transactionID,:shardIndex);)sql"; - - auto const transactionID = - to_string(acceptedLedgerTx->getTransactionID()); - - txnMetaSession << txnSQL, soci::use(transactionID), - soci::use(shardIndex); - } - - tr.commit(); - } - - return true; -} - -std::optional -getShardIndexforLedger(soci::session& session, LedgerHash const& hash) -{ - std::uint32_t shardIndex; - session << "SELECT ShardIndex FROM LedgerMeta WHERE LedgerHash = '" << hash - << "';", - soci::into(shardIndex); - - if (!session.got_data()) - return std::nullopt; - - return shardIndex; -} - -std::optional -getShardIndexforTransaction(soci::session& session, TxID const& id) -{ - std::uint32_t shardIndex; - session << "SELECT ShardIndex FROM TransactionMeta WHERE TransID = '" << id - << "';", - soci::into(shardIndex); - - if (!session.got_data()) - return std::nullopt; - - return shardIndex; -} - DatabasePair makeShardCompleteLedgerDBs( Config const& config, @@ -333,8 +202,6 @@ updateLedgerDBs( return true; } -/* Shard acquire db */ - std::unique_ptr makeAcquireDB( DatabaseCon::Setup const& setup, @@ -446,50 +313,4 @@ updateAcquireDB( } } -/* Archive DB */ - -std::unique_ptr -makeArchiveDB(boost::filesystem::path const& dir, std::string const& dbName) -{ - return std::make_unique( - dir, dbName, DownloaderDBPragma, ShardArchiveHandlerDBInit); -} - -void -readArchiveDB( - DatabaseCon& db, - std::function const& func) -{ - soci::rowset rs = - (db.getSession().prepare << "SELECT * FROM State;"); - - for (auto it = rs.begin(); it != rs.end(); ++it) - { - func(it->get(1), it->get(0)); - } -} - -void -insertArchiveDB( - DatabaseCon& db, - std::uint32_t shardIndex, - std::string const& url) -{ - db.getSession() << "INSERT INTO State VALUES (:index, :url);", - soci::use(shardIndex), soci::use(url); -} - -void -deleteFromArchiveDB(DatabaseCon& db, std::uint32_t shardIndex) -{ - db.getSession() << "DELETE FROM State WHERE ShardIndex = :index;", - soci::use(shardIndex); -} - -void -dropArchiveDB(DatabaseCon& db) -{ - db.getSession() << "DROP TABLE State;"; -} - } // namespace ripple diff --git a/src/ripple/app/rdb/impl/Vacuum.cpp b/src/ripple/app/rdb/impl/Vacuum.cpp new file mode 100644 index 00000000000..aad456cc5a8 --- /dev/null +++ b/src/ripple/app/rdb/impl/Vacuum.cpp @@ -0,0 +1,67 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace ripple { + +bool +doVacuumDB(DatabaseCon::Setup const& setup) +{ + boost::filesystem::path dbPath = setup.dataDir / TxDBName; + + uintmax_t const dbSize = file_size(dbPath); + assert(dbSize != static_cast(-1)); + + if (auto available = space(dbPath.parent_path()).available; + available < dbSize) + { + std::cerr << "The database filesystem must have at least as " + "much free space as the size of " + << dbPath.string() << ", which is " << dbSize + << " bytes. Only " << available << " bytes are available.\n"; + return false; + } + + auto txnDB = + std::make_unique(setup, TxDBName, TxDBPragma, TxDBInit); + auto& session = txnDB->getSession(); + std::uint32_t pageSize; + + // Only the most trivial databases will fit in memory on typical + // (recommended) hardware. Force temp files to be written to disk + // regardless of the config settings. + session << boost::format(CommonDBPragmaTemp) % "file"; + session << "PRAGMA page_size;", soci::into(pageSize); + + std::cout << "VACUUM beginning. page_size: " << pageSize << std::endl; + + session << "VACUUM;"; + assert(setup.globalPragma); + for (auto const& p : *setup.globalPragma) + session << p; + session << "PRAGMA page_size;", soci::into(pageSize); + + std::cout << "VACUUM finished. page_size: " << pageSize << std::endl; + + return true; +} + +} // namespace ripple diff --git a/src/ripple/app/rdb/impl/Wallet.cpp b/src/ripple/app/rdb/impl/Wallet.cpp new file mode 100644 index 00000000000..c6040964bde --- /dev/null +++ b/src/ripple/app/rdb/impl/Wallet.cpp @@ -0,0 +1,292 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2021 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include +#include + +namespace ripple { + +std::unique_ptr +makeWalletDB(DatabaseCon::Setup const& setup) +{ + // wallet database + return std::make_unique( + setup, WalletDBName, std::array(), WalletDBInit); +} + +std::unique_ptr +makeTestWalletDB(DatabaseCon::Setup const& setup, std::string const& dbname) +{ + // wallet database + return std::make_unique( + setup, dbname.data(), std::array(), WalletDBInit); +} + +void +getManifests( + soci::session& session, + std::string const& dbTable, + ManifestCache& mCache, + beast::Journal j) +{ + // Load manifests stored in database + std::string const sql = "SELECT RawData FROM " + dbTable + ";"; + soci::blob sociRawData(session); + soci::statement st = (session.prepare << sql, soci::into(sociRawData)); + st.execute(); + while (st.fetch()) + { + std::string serialized; + convert(sociRawData, serialized); + if (auto mo = deserializeManifest(serialized)) + { + if (!mo->verify()) + { + JLOG(j.warn()) << "Unverifiable manifest in db"; + continue; + } + + mCache.applyManifest(std::move(*mo)); + } + else + { + JLOG(j.warn()) << "Malformed manifest in database"; + } + } +} + +static void +saveManifest( + soci::session& session, + std::string const& dbTable, + std::string const& serialized) +{ + // soci does not support bulk insertion of blob data + // Do not reuse blob because manifest ecdsa signatures vary in length + // but blob write length is expected to be >= the last write + soci::blob rawData(session); + convert(serialized, rawData); + session << "INSERT INTO " << dbTable << " (RawData) VALUES (:rawData);", + soci::use(rawData); +} + +void +saveManifests( + soci::session& session, + std::string const& dbTable, + std::function const& isTrusted, + hash_map const& map, + beast::Journal j) +{ + soci::transaction tr(session); + session << "DELETE FROM " << dbTable; + for (auto const& v : map) + { + // Save all revocation manifests, + // but only save trusted non-revocation manifests. + if (!v.second.revoked() && !isTrusted(v.second.masterKey)) + { + JLOG(j.info()) << "Untrusted manifest in cache not saved to db"; + continue; + } + + saveManifest(session, dbTable, v.second.serialized); + } + tr.commit(); +} + +void +addValidatorManifest(soci::session& session, std::string const& serialized) +{ + soci::transaction tr(session); + saveManifest(session, "ValidatorManifests", serialized); + tr.commit(); +} + +std::pair +getNodeIdentity(soci::session& session) +{ + { + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional pubKO, priKO; + soci::statement st = + (session.prepare + << "SELECT PublicKey, PrivateKey FROM NodeIdentity;", + soci::into(pubKO), + soci::into(priKO)); + st.execute(); + while (st.fetch()) + { + auto const sk = parseBase58( + TokenType::NodePrivate, priKO.value_or("")); + auto const pk = parseBase58( + TokenType::NodePublic, pubKO.value_or("")); + + // Only use if the public and secret keys are a pair + if (sk && pk && (*pk == derivePublicKey(KeyType::secp256k1, *sk))) + return {*pk, *sk}; + } + } + + // If a valid identity wasn't found, we randomly generate a new one: + auto [newpublicKey, newsecretKey] = randomKeyPair(KeyType::secp256k1); + + session << str( + boost::format("INSERT INTO NodeIdentity (PublicKey,PrivateKey) " + "VALUES ('%s','%s');") % + toBase58(TokenType::NodePublic, newpublicKey) % + toBase58(TokenType::NodePrivate, newsecretKey)); + + return {newpublicKey, newsecretKey}; +} + +std::unordered_set, KeyEqual> +getPeerReservationTable(soci::session& session, beast::Journal j) +{ + std::unordered_set, KeyEqual> table; + // These values must be boost::optionals (not std) because SOCI expects + // boost::optionals. + boost::optional valPubKey, valDesc; + // We should really abstract the table and column names into constants, + // but no one else does. Because it is too tedious? It would be easy if we + // had a jOOQ for C++. + soci::statement st = + (session.prepare + << "SELECT PublicKey, Description FROM PeerReservations;", + soci::into(valPubKey), + soci::into(valDesc)); + st.execute(); + while (st.fetch()) + { + if (!valPubKey || !valDesc) + { + // This represents a `NULL` in a `NOT NULL` column. It should be + // unreachable. + continue; + } + auto const optNodeId = + parseBase58(TokenType::NodePublic, *valPubKey); + if (!optNodeId) + { + JLOG(j.warn()) << "load: not a public key: " << valPubKey; + continue; + } + table.insert(PeerReservation{*optNodeId, *valDesc}); + } + + return table; +} + +void +insertPeerReservation( + soci::session& session, + PublicKey const& nodeId, + std::string const& description) +{ + session << "INSERT INTO PeerReservations (PublicKey, Description) " + "VALUES (:nodeId, :desc) " + "ON CONFLICT (PublicKey) DO UPDATE SET " + "Description=excluded.Description", + soci::use(toBase58(TokenType::NodePublic, nodeId)), + soci::use(description); +} + +void +deletePeerReservation(soci::session& session, PublicKey const& nodeId) +{ + session << "DELETE FROM PeerReservations WHERE PublicKey = :nodeId", + soci::use(toBase58(TokenType::NodePublic, nodeId)); +} + +bool +createFeatureVotes(soci::session& session) +{ + soci::transaction tr(session); + std::string sql = + "SELECT count(*) FROM sqlite_master " + "WHERE type='table' AND name='FeatureVotes'"; + // SOCI requires boost::optional (not std::optional) as the parameter. + boost::optional featureVotesCount; + session << sql, soci::into(featureVotesCount); + bool exists = static_cast(*featureVotesCount); + + // Create FeatureVotes table in WalletDB if it doesn't exist + if (!exists) + { + session << "CREATE TABLE FeatureVotes ( " + "AmendmentHash CHARACTER(64) NOT NULL, " + "AmendmentName TEXT, " + "Veto INTEGER NOT NULL );"; + tr.commit(); + } + return exists; +} + +void +readAmendments( + soci::session& session, + std::function amendment_hash, + boost::optional amendment_name, + boost::optional vote)> const& callback) +{ + // lambda that converts the internally stored int to an AmendmentVote. + auto intToVote = [](boost::optional const& dbVote) + -> boost::optional { + return safe_cast(dbVote.value_or(1)); + }; + + soci::transaction tr(session); + std::string sql = + "SELECT AmendmentHash, AmendmentName, Veto FROM FeatureVotes"; + // SOCI requires boost::optional (not std::optional) as parameters. + boost::optional amendment_hash; + boost::optional amendment_name; + boost::optional vote_to_veto; + soci::statement st = + (session.prepare << sql, + soci::into(amendment_hash), + soci::into(amendment_name), + soci::into(vote_to_veto)); + st.execute(); + while (st.fetch()) + { + callback(amendment_hash, amendment_name, intToVote(vote_to_veto)); + } +} + +void +voteAmendment( + soci::session& session, + uint256 const& amendment, + std::string const& name, + AmendmentVote vote) +{ + soci::transaction tr(session); + std::string sql = + "INSERT INTO FeatureVotes (AmendmentHash, AmendmentName, Veto) VALUES " + "('"; + sql += to_string(amendment); + sql += "', '" + name; + sql += "', '" + std::to_string(safe_cast(vote)) + "');"; + session << sql; + tr.commit(); +} + +} // namespace ripple diff --git a/src/ripple/app/reporting/ReportingETL.cpp b/src/ripple/app/reporting/ReportingETL.cpp index 15eda7b2747..7e15d242a72 100644 --- a/src/ripple/app/reporting/ReportingETL.cpp +++ b/src/ripple/app/reporting/ReportingETL.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include #include @@ -167,8 +167,7 @@ ReportingETL::loadInitialLedger(uint32_t startingSequence) if (app_.config().reporting()) { #ifdef RIPPLED_REPORTING - dynamic_cast( - &app_.getRelationalDBInterface()) + dynamic_cast(&app_.getRelationalDatabase()) ->writeLedgerAndTransactions(ledger->info(), accountTxData); #endif } @@ -595,69 +594,69 @@ ReportingETL::runETLPipeline(uint32_t startSequence) loadQueue.push({}); }}; - std::thread loader{ - [this, &lastPublishedSequence, &loadQueue, &writeConflict]() { - beast::setCurrentThreadName("rippled: ReportingETL load"); - size_t totalTransactions = 0; - double totalTime = 0; - while (!writeConflict) - { - std::optional, - std::vector>> - result{loadQueue.pop()}; - // if result is an empty optional, the transformer thread has - // stopped and the loader should stop as well - if (!result) - break; - if (isStopping()) - continue; - - auto& ledger = result->first; - auto& accountTxData = result->second; - - auto start = std::chrono::system_clock::now(); - // write to the key-value store - flushLedger(ledger); - - auto mid = std::chrono::system_clock::now(); + std::thread loader{[this, + &lastPublishedSequence, + &loadQueue, + &writeConflict]() { + beast::setCurrentThreadName("rippled: ReportingETL load"); + size_t totalTransactions = 0; + double totalTime = 0; + while (!writeConflict) + { + std::optional, + std::vector>> + result{loadQueue.pop()}; + // if result is an empty optional, the transformer thread has + // stopped and the loader should stop as well + if (!result) + break; + if (isStopping()) + continue; + + auto& ledger = result->first; + auto& accountTxData = result->second; + + auto start = std::chrono::system_clock::now(); + // write to the key-value store + flushLedger(ledger); + + auto mid = std::chrono::system_clock::now(); // write to RDBMS // if there is a write conflict, some other process has already // written this ledger and has taken over as the ETL writer #ifdef RIPPLED_REPORTING - if (!dynamic_cast( - &app_.getRelationalDBInterface()) - ->writeLedgerAndTransactions( - ledger->info(), accountTxData)) - writeConflict = true; + if (!dynamic_cast(&app_.getRelationalDatabase()) + ->writeLedgerAndTransactions( + ledger->info(), accountTxData)) + writeConflict = true; #endif - auto end = std::chrono::system_clock::now(); + auto end = std::chrono::system_clock::now(); - if (!writeConflict) - { - publishLedger(ledger); - lastPublishedSequence = ledger->info().seq; - } - // print some performance numbers - auto kvTime = ((mid - start).count()) / 1000000000.0; - auto relationalTime = ((end - mid).count()) / 1000000000.0; - - size_t numTxns = accountTxData.size(); - totalTime += kvTime; - totalTransactions += numTxns; - JLOG(journal_.info()) - << "Load phase of etl : " - << "Successfully published ledger! Ledger info: " - << detail::toString(ledger->info()) - << ". txn count = " << numTxns - << ". key-value write time = " << kvTime - << ". relational write time = " << relationalTime - << ". key-value tps = " << numTxns / kvTime - << ". relational tps = " << numTxns / relationalTime - << ". total key-value tps = " - << totalTransactions / totalTime; + if (!writeConflict) + { + publishLedger(ledger); + lastPublishedSequence = ledger->info().seq; } - }}; + // print some performance numbers + auto kvTime = ((mid - start).count()) / 1000000000.0; + auto relationalTime = ((end - mid).count()) / 1000000000.0; + + size_t numTxns = accountTxData.size(); + totalTime += kvTime; + totalTransactions += numTxns; + JLOG(journal_.info()) + << "Load phase of etl : " + << "Successfully published ledger! Ledger info: " + << detail::toString(ledger->info()) + << ". txn count = " << numTxns + << ". key-value write time = " << kvTime + << ". relational write time = " << relationalTime + << ". key-value tps = " << numTxns / kvTime + << ". relational tps = " << numTxns / relationalTime + << ". total key-value tps = " << totalTransactions / totalTime; + } + }}; // wait for all of the threads to stop loader.join(); diff --git a/src/ripple/app/reporting/ReportingETL.h b/src/ripple/app/reporting/ReportingETL.h index 540cc5bfd3d..71e08adf1f3 100644 --- a/src/ripple/app/reporting/ReportingETL.h +++ b/src/ripple/app/reporting/ReportingETL.h @@ -21,7 +21,7 @@ #define RIPPLE_APP_REPORTING_REPORTINGETL_H_INCLUDED #include -#include +#include #include #include #include @@ -50,7 +50,7 @@ #include namespace ripple { -using AccountTransactionsData = RelationalDBInterface::AccountTransactionsData; +using AccountTransactionsData = RelationalDatabase::AccountTransactionsData; /** * This class is responsible for continuously extracting data from a diff --git a/src/ripple/app/tx/impl/InvariantCheck.cpp b/src/ripple/app/tx/impl/InvariantCheck.cpp index 82f4cea6b3d..8664c6492b9 100644 --- a/src/ripple/app/tx/impl/InvariantCheck.cpp +++ b/src/ripple/app/tx/impl/InvariantCheck.cpp @@ -18,6 +18,8 @@ //============================================================================== #include + +#include #include #include #include @@ -493,23 +495,27 @@ ValidNewAccountRoot::finalize( void ValidNFTokenPage::visitEntry( - bool, + bool isDelete, std::shared_ptr const& before, std::shared_ptr const& after) { static constexpr uint256 const& pageBits = nft::pageMask; static constexpr uint256 const accountBits = ~pageBits; - auto check = [this](std::shared_ptr const& sle) { - auto const account = sle->key() & accountBits; - auto const limit = sle->key() & pageBits; + auto check = [this, isDelete](std::shared_ptr const& sle) { + uint256 const account = sle->key() & accountBits; + uint256 const hiLimit = sle->key() & pageBits; + std::optional const prev = (*sle)[~sfPreviousPageMin]; - if (auto const prev = (*sle)[~sfPreviousPageMin]) + // Make sure that any page links... + // 1. Are properly associated with the owning account and + // 2. The page is correctly ordered between links. + if (prev) { if (account != (*prev & accountBits)) badLink_ = true; - if (limit <= (*prev & pageBits)) + if (hiLimit <= (*prev & pageBits)) badLink_ = true; } @@ -518,17 +524,42 @@ ValidNFTokenPage::visitEntry( if (account != (*next & accountBits)) badLink_ = true; - if (limit >= (*next & pageBits)) + if (hiLimit >= (*next & pageBits)) badLink_ = true; } - for (auto const& obj : sle->getFieldArray(sfNFTokens)) { - if ((obj[sfNFTokenID] & pageBits) >= limit) - badEntry_ = true; - - if (auto uri = obj[~sfURI]; uri && uri->empty()) - badURI_ = true; + auto const& nftokens = sle->getFieldArray(sfNFTokens); + + // An NFTokenPage should never contain too many tokens or be empty. + if (std::size_t const nftokenCount = nftokens.size(); + (!isDelete && nftokenCount == 0) || + nftokenCount > dirMaxTokensPerPage) + invalidSize_ = true; + + // If prev is valid, use it to establish a lower bound for + // page entries. If prev is not valid the lower bound is zero. + uint256 const loLimit = + prev ? *prev & pageBits : uint256(beast::zero); + + // Also verify that all NFTokenIDs in the page are sorted. + uint256 loCmp = loLimit; + for (auto const& obj : nftokens) + { + uint256 const tokenID = obj[sfNFTokenID]; + if (!nft::compareTokens(loCmp, tokenID)) + badSort_ = true; + loCmp = tokenID; + + // None of the NFTs on this page should belong on lower or + // higher pages. + if (uint256 const tokenPageBits = tokenID & pageBits; + tokenPageBits < loLimit || tokenPageBits >= hiLimit) + badEntry_ = true; + + if (auto uri = obj[~sfURI]; uri && uri->empty()) + badURI_ = true; + } } }; @@ -559,12 +590,24 @@ ValidNFTokenPage::finalize( return false; } + if (badSort_) + { + JLOG(j.fatal()) << "Invariant failed: NFTs on page are not sorted."; + return false; + } + if (badURI_) { JLOG(j.fatal()) << "Invariant failed: NFT contains empty URI."; return false; } + if (invalidSize_) + { + JLOG(j.fatal()) << "Invariant failed: NFT page has invalid size."; + return false; + } + return true; } diff --git a/src/ripple/app/tx/impl/InvariantCheck.h b/src/ripple/app/tx/impl/InvariantCheck.h index 5936b59b6a8..c3bb0216426 100644 --- a/src/ripple/app/tx/impl/InvariantCheck.h +++ b/src/ripple/app/tx/impl/InvariantCheck.h @@ -320,9 +320,11 @@ class ValidNewAccountRoot class ValidNFTokenPage { - bool badLink_ = false; bool badEntry_ = false; + bool badLink_ = false; + bool badSort_ = false; bool badURI_ = false; + bool invalidSize_ = false; public: void diff --git a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp index b7997996e40..7c78f175f63 100644 --- a/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp +++ b/src/ripple/app/tx/impl/NFTokenAcceptOffer.cpp @@ -63,36 +63,33 @@ NFTokenAcceptOffer::preflight(PreflightContext const& ctx) TER NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) { - auto const checkOffer = [&ctx](std::optional id) -> TER { + auto const checkOffer = [&ctx](std::optional id) + -> std::pair, TER> { if (id) { - auto const offer = ctx.view.read(keylet::nftoffer(*id)); + auto offerSLE = ctx.view.read(keylet::nftoffer(*id)); - if (!offer) - return tecOBJECT_NOT_FOUND; + if (!offerSLE) + return {nullptr, tecOBJECT_NOT_FOUND}; - if (hasExpired(ctx.view, (*offer)[~sfExpiration])) - return tecEXPIRED; - } + if (hasExpired(ctx.view, (*offerSLE)[~sfExpiration])) + return {nullptr, tecEXPIRED}; - return tesSUCCESS; + return {std::move(offerSLE), tesSUCCESS}; + } + return {nullptr, tesSUCCESS}; }; - auto const buy = ctx.tx[~sfNFTokenBuyOffer]; - auto const sell = ctx.tx[~sfNFTokenSellOffer]; - - if (auto const ret = checkOffer(buy); !isTesSuccess(ret)) - return ret; - - if (auto const ret = checkOffer(sell); !isTesSuccess(ret)) - return ret; + auto const [bo, err1] = checkOffer(ctx.tx[~sfNFTokenBuyOffer]); + if (!isTesSuccess(err1)) + return err1; + auto const [so, err2] = checkOffer(ctx.tx[~sfNFTokenSellOffer]); + if (!isTesSuccess(err2)) + return err2; - if (buy && sell) + if (bo && so) { // Brokered mode: - auto const bo = ctx.view.read(keylet::nftoffer(*buy)); - auto const so = ctx.view.read(keylet::nftoffer(*sell)); - // The two offers being brokered must be for the same token: if ((*bo)[sfNFTokenID] != (*so)[sfNFTokenID]) return tecNFTOKEN_BUY_SELL_MISMATCH; @@ -131,10 +128,8 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) } } - if (buy) + if (bo) { - auto const bo = ctx.view.read(keylet::nftoffer(*buy)); - if (((*bo)[sfFlags] & lsfSellNFToken) == lsfSellNFToken) return tecNFTOKEN_OFFER_TYPE_MISMATCH; @@ -143,7 +138,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) return tecCANT_ACCEPT_OWN_NFTOKEN_OFFER; // If not in bridged mode, the account must own the token: - if (!sell && + if (!so && !nft::findToken(ctx.view, ctx.tx[sfAccount], (*bo)[sfNFTokenID])) return tecNO_PERMISSION; @@ -160,10 +155,8 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) return tecINSUFFICIENT_FUNDS; } - if (sell) + if (so) { - auto const so = ctx.view.read(keylet::nftoffer(*sell)); - if (((*so)[sfFlags] & lsfSellNFToken) != lsfSellNFToken) return tecNFTOKEN_OFFER_TYPE_MISMATCH; @@ -176,7 +169,7 @@ NFTokenAcceptOffer::preclaim(PreclaimContext const& ctx) return tecNO_PERMISSION; // If not in bridged mode... - if (!buy) + if (!bo) { // If the offer has a Destination field, the acceptor must be the // Destination. diff --git a/src/ripple/app/tx/impl/NFTokenBurn.cpp b/src/ripple/app/tx/impl/NFTokenBurn.cpp index f1f5ae8a787..da23d78bdbd 100644 --- a/src/ripple/app/tx/impl/NFTokenBurn.cpp +++ b/src/ripple/app/tx/impl/NFTokenBurn.cpp @@ -77,27 +77,9 @@ NFTokenBurn::preclaim(PreclaimContext const& ctx) } } - auto const id = ctx.tx[sfNFTokenID]; - - std::size_t totalOffers = 0; - - { - Dir buys(ctx.view, keylet::nft_buys(id)); - totalOffers += std::distance(buys.begin(), buys.end()); - } - - if (totalOffers > maxDeletableTokenOfferEntries) - return tefTOO_BIG; - - { - Dir sells(ctx.view, keylet::nft_sells(id)); - totalOffers += std::distance(sells.begin(), sells.end()); - } - - if (totalOffers > maxDeletableTokenOfferEntries) - return tefTOO_BIG; - - return tesSUCCESS; + // If there are too many offers, then burning the token would produce too + // much metadata. Disallow burning a token with too many offers. + return nft::notTooManyOffers(ctx.view, ctx.tx[sfNFTokenID]); } TER diff --git a/src/ripple/app/tx/impl/SetSignerList.cpp b/src/ripple/app/tx/impl/SetSignerList.cpp index 78409ba7145..07cc705bad1 100644 --- a/src/ripple/app/tx/impl/SetSignerList.cpp +++ b/src/ripple/app/tx/impl/SetSignerList.cpp @@ -82,6 +82,7 @@ SetSignerList::preflight(PreflightContext const& ctx) return ret; auto const result = determineOperation(ctx.tx, ctx.flags, ctx.j); + if (std::get<0>(result) != tesSUCCESS) return std::get<0>(result); @@ -98,7 +99,11 @@ SetSignerList::preflight(PreflightContext const& ctx) // Validate our settings. auto const account = ctx.tx.getAccountID(sfAccount); NotTEC const ter = validateQuorumAndSignerEntries( - std::get<1>(result), std::get<2>(result), account, ctx.j); + std::get<1>(result), + std::get<2>(result), + account, + ctx.j, + ctx.rules); if (ter != tesSUCCESS) { return ter; @@ -149,7 +154,7 @@ SetSignerList::preCompute() // is valid until the featureMultiSignReserve amendment passes. Once it // passes then just 1 OwnerCount is associated with a SignerList. static int -signerCountBasedOwnerCountDelta(std::size_t entryCount) +signerCountBasedOwnerCountDelta(std::size_t entryCount, Rules const& rules) { // We always compute the full change in OwnerCount, taking into account: // o The fact that we're adding/removing a SignerList and @@ -164,9 +169,10 @@ signerCountBasedOwnerCountDelta(std::size_t entryCount) // units. A SignerList with 8 entries would cost 10 OwnerCount units. // // The static_cast should always be safe since entryCount should always - // be in the range from 1 to 8. We've got a lot of room to grow. + // be in the range from 1 to 8 (or 32 if ExpandedSignerList is enabled). + // We've got a lot of room to grow. assert(entryCount >= STTx::minMultiSigners); - assert(entryCount <= STTx::maxMultiSigners); + assert(entryCount <= STTx::maxMultiSigners(&rules)); return 2 + static_cast(entryCount); } @@ -195,7 +201,8 @@ removeSignersFromLedger( { STArray const& actualList = signers->getFieldArray(sfSignerEntries); removeFromOwnerCount = - signerCountBasedOwnerCountDelta(actualList.size()) * -1; + signerCountBasedOwnerCountDelta(actualList.size(), view.rules()) * + -1; } // Remove the node from the account directory. @@ -238,13 +245,14 @@ SetSignerList::validateQuorumAndSignerEntries( std::uint32_t quorum, std::vector const& signers, AccountID const& account, - beast::Journal j) + beast::Journal j, + Rules const& rules) { // Reject if there are too many or too few entries in the list. { std::size_t const signerCount = signers.size(); if ((signerCount < STTx::minMultiSigners) || - (signerCount > STTx::maxMultiSigners)) + (signerCount > STTx::maxMultiSigners(&rules))) { JLOG(j.trace()) << "Too many or too few signers in signer list."; return temMALFORMED; @@ -259,6 +267,9 @@ SetSignerList::validateQuorumAndSignerEntries( return temBAD_SIGNER; } + // Is the ExpandedSignerList amendment active? + bool const expandedSignerList = rules.enabled(featureExpandedSignerList); + // Make sure no signers reference this account. Also make sure the // quorum can be reached. std::uint64_t allSignersWeight(0); @@ -279,6 +290,14 @@ SetSignerList::validateQuorumAndSignerEntries( return temBAD_SIGNER; } + if (signer.tag && !expandedSignerList) + { + JLOG(j.trace()) << "Malformed transaction: sfWalletLocator " + "specified in SignerEntry " + << "but featureExpandedSignerList is not enabled."; + return temMALFORMED; + } + // Don't verify that the signer accounts exist. Non-existent accounts // may be phantom accounts (which are permitted). } @@ -321,7 +340,8 @@ SetSignerList::replaceSignerList() std::uint32_t flags{lsfOneOwnerCount}; if (!ctx_.view().rules().enabled(featureMultiSignReserve)) { - addedOwnerCount = signerCountBasedOwnerCountDelta(signers_.size()); + addedOwnerCount = signerCountBasedOwnerCountDelta( + signers_.size(), ctx_.view().rules()); flags = 0; } @@ -389,6 +409,9 @@ SetSignerList::writeSignersToSLE( if (flags) // Only set flags if they are non-default (default is zero). ledgerEntry->setFieldU32(sfFlags, flags); + bool const expandedSignerList = + ctx_.view().rules().enabled(featureExpandedSignerList); + // Create the SignerListArray one SignerEntry at a time. STArray toLedger(signers_.size()); for (auto const& entry : signers_) @@ -398,6 +421,11 @@ SetSignerList::writeSignersToSLE( obj.reserve(2); obj.setAccountID(sfAccount, entry.account); obj.setFieldU16(sfSignerWeight, entry.weight); + + // This is a defensive check to make absolutely sure we will never write + // a tag into the ledger while featureExpandedSignerList is not enabled + if (expandedSignerList && entry.tag) + obj.setFieldH256(sfWalletLocator, *(entry.tag)); } // Assign the SignerEntries. diff --git a/src/ripple/app/tx/impl/SetSignerList.h b/src/ripple/app/tx/impl/SetSignerList.h index 345d5940232..f8e49e4a7b0 100644 --- a/src/ripple/app/tx/impl/SetSignerList.h +++ b/src/ripple/app/tx/impl/SetSignerList.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -83,7 +84,8 @@ class SetSignerList : public Transactor std::uint32_t quorum, std::vector const& signers, AccountID const& account, - beast::Journal j); + beast::Journal j, + Rules const&); TER replaceSignerList(); diff --git a/src/ripple/app/tx/impl/SignerEntries.cpp b/src/ripple/app/tx/impl/SignerEntries.cpp index 5081dc3e2f2..a948b2f902b 100644 --- a/src/ripple/app/tx/impl/SignerEntries.cpp +++ b/src/ripple/app/tx/impl/SignerEntries.cpp @@ -22,6 +22,7 @@ #include #include #include +#include namespace ripple { @@ -41,7 +42,7 @@ SignerEntries::deserialize( } std::vector accountVec; - accountVec.reserve(STTx::maxMultiSigners); + accountVec.reserve(STTx::maxMultiSigners()); STArray const& sEntries(obj.getFieldArray(sfSignerEntries)); for (STObject const& sEntry : sEntries) @@ -57,7 +58,9 @@ SignerEntries::deserialize( // Extract SignerEntry fields. AccountID const account = sEntry.getAccountID(sfAccount); std::uint16_t const weight = sEntry.getFieldU16(sfSignerWeight); - accountVec.emplace_back(account, weight); + std::optional const tag = sEntry.at(~sfWalletLocator); + + accountVec.emplace_back(account, weight, tag); } return accountVec; } diff --git a/src/ripple/app/tx/impl/SignerEntries.h b/src/ripple/app/tx/impl/SignerEntries.h index 96b5e29d9c8..cf4921ecf4b 100644 --- a/src/ripple/app/tx/impl/SignerEntries.h +++ b/src/ripple/app/tx/impl/SignerEntries.h @@ -23,9 +23,11 @@ #include // NotTEC #include // #include // beast::Journal +#include // Rules #include // STTx::maxMultiSigners #include // temMALFORMED #include // AccountID +#include namespace ripple { @@ -42,9 +44,13 @@ class SignerEntries { AccountID account; std::uint16_t weight; + std::optional tag; - SignerEntry(AccountID const& inAccount, std::uint16_t inWeight) - : account(inAccount), weight(inWeight) + SignerEntry( + AccountID const& inAccount, + std::uint16_t inWeight, + std::optional inTag) + : account(inAccount), weight(inWeight), tag(inTag) { } diff --git a/src/ripple/app/tx/impl/apply.cpp b/src/ripple/app/tx/impl/apply.cpp index 332364863c1..cc1e792c014 100644 --- a/src/ripple/app/tx/impl/apply.cpp +++ b/src/ripple/app/tx/impl/apply.cpp @@ -54,7 +54,7 @@ checkValidity( ? STTx::RequireFullyCanonicalSig::yes : STTx::RequireFullyCanonicalSig::no; - auto const sigVerify = tx.checkSign(requireCanonicalSig); + auto const sigVerify = tx.checkSign(requireCanonicalSig, rules); if (!sigVerify) { router.setFlags(id, SF_SIGBAD); diff --git a/src/ripple/app/tx/impl/details/NFTokenUtils.cpp b/src/ripple/app/tx/impl/details/NFTokenUtils.cpp index f99c6cf6b17..d1214a98ee8 100644 --- a/src/ripple/app/tx/impl/details/NFTokenUtils.cpp +++ b/src/ripple/app/tx/impl/details/NFTokenUtils.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -131,15 +132,40 @@ getPageForToken( cmp; }); - // If splitIter == begin(), then the entire page is filled with - // equivalent tokens. We cannot split the page, so we cannot - // insert the requested token. - // // There should be no circumstance when splitIter == end(), but if it // were to happen we should bail out because something is confused. - if (splitIter == narr.begin() || splitIter == narr.end()) + if (splitIter == narr.end()) return nullptr; + // If splitIter == begin(), then the entire page is filled with + // equivalent tokens. This requires special handling. + if (splitIter == narr.begin()) + { + // Prior to fixNFTokenDirV1 we simply stopped. + if (!view.rules().enabled(fixNFTokenDirV1)) + return nullptr; + else + { + // This would be an ideal place for the spaceship operator... + int const relation = compare(id & nft::pageMask, cmp); + if (relation == 0) + // If the passed in id belongs exactly on this (full) page + // this account simply cannot store the NFT. + return nullptr; + + else if (relation > 0) + // We need to leave the entire contents of this page in + // narr so carr stays empty. The new NFT will be + // inserted in carr. This keeps the NFTs that must be + // together all on their own page. + splitIter = narr.end(); + + // If neither of those conditions apply then put all of + // narr into carr and produce an empty narr where the new NFT + // will be inserted. Leave the split at narr.begin(). + } + } + // Split narr at splitIter. STArray newCarr( std::make_move_iterator(splitIter), @@ -148,8 +174,20 @@ getPageForToken( std::swap(carr, newCarr); } - auto np = std::make_shared( - keylet::nftpage(base, carr[0].getFieldH256(sfNFTokenID))); + // Determine the ID for the page index. This decision is conditional on + // fixNFTokenDirV1 being enabled. But the condition for the decision + // is not possible unless fixNFTokenDirV1 is enabled. + // + // Note that we use uint256::next() because there's a subtlety in the way + // NFT pages are structured. The low 96-bits of NFT ID must be strictly + // less than the low 96-bits of the enclosing page's index. In order to + // accommodate that requirement we use an index one higher than the + // largest NFT in the page. + uint256 const tokenIDForNewPage = narr.size() == dirMaxTokensPerPage + ? narr[dirMaxTokensPerPage - 1].getFieldH256(sfNFTokenID).next() + : carr[0].getFieldH256(sfNFTokenID); + + auto np = std::make_shared(keylet::nftpage(base, tokenIDForNewPage)); np->setFieldArray(sfNFTokens, narr); np->setFieldH256(sfNextPageMin, cp->key()); @@ -172,10 +210,17 @@ getPageForToken( createCallback(view, owner); - return (first.key <= np->key()) ? np : cp; + // fixNFTokenDirV1 corrects a bug in the initial implementation that + // would put an NFT in the wrong page. The problem was caused by an + // off-by-one subtlety that the NFT can only be stored in the first page + // with a key that's strictly greater than `first` + if (!view.rules().enabled(fixNFTokenDirV1)) + return (first.key <= np->key()) ? np : cp; + + return (first.key < np->key()) ? np : cp; } -static bool +bool compareTokens(uint256 const& a, uint256 const& b) { // The sort of NFTokens needs to be fully deterministic, but the sort @@ -505,6 +550,33 @@ removeAllTokenOffers(ApplyView& view, Keylet const& directory) }); } +TER +notTooManyOffers(ReadView const& view, uint256 const& nftokenID) +{ + std::size_t totalOffers = 0; + + { + Dir buys(view, keylet::nft_buys(nftokenID)); + for (auto iter = buys.begin(); iter != buys.end(); iter.next_page()) + { + totalOffers += iter.page_size(); + if (totalOffers > maxDeletableTokenOfferEntries) + return tefTOO_BIG; + } + } + + { + Dir sells(view, keylet::nft_sells(nftokenID)); + for (auto iter = sells.begin(); iter != sells.end(); iter.next_page()) + { + totalOffers += iter.page_size(); + if (totalOffers > maxDeletableTokenOfferEntries) + return tefTOO_BIG; + } + } + return tesSUCCESS; +} + bool deleteTokenOffer(ApplyView& view, std::shared_ptr const& offer) { diff --git a/src/ripple/app/tx/impl/details/NFTokenUtils.h b/src/ripple/app/tx/impl/details/NFTokenUtils.h index aac5dbf5fa7..fa8c43b5877 100644 --- a/src/ripple/app/tx/impl/details/NFTokenUtils.h +++ b/src/ripple/app/tx/impl/details/NFTokenUtils.h @@ -53,15 +53,14 @@ constexpr std::uint16_t const flagOnlyXRP = 0x0002; constexpr std::uint16_t const flagCreateTrustLines = 0x0004; constexpr std::uint16_t const flagTransferable = 0x0008; -/** Erases the specified offer from the specified token offer directory. - - */ -void -removeTokenOffer(ApplyView& view, uint256 const& id); - +/** Deletes all offers from the specified token offer directory. */ void removeAllTokenOffers(ApplyView& view, Keylet const& directory); +/** Returns tesSUCCESS if NFToken has few enough offers that it can be burned */ +TER +notTooManyOffers(ReadView const& view, uint256 const& nftokenID); + /** Finds the specified token in the owner's token directory. */ std::optional findToken( @@ -179,6 +178,9 @@ getIssuer(uint256 const& id) return AccountID::fromVoid(id.data() + 4); } +bool +compareTokens(uint256 const& a, uint256 const& b); + } // namespace nft } // namespace ripple diff --git a/src/ripple/beast/utility/rngfill.h b/src/ripple/beast/utility/rngfill.h index d906a66a0b3..cf8b7bbeb4b 100644 --- a/src/ripple/beast/utility/rngfill.h +++ b/src/ripple/beast/utility/rngfill.h @@ -32,6 +32,7 @@ void rngfill(void* buffer, std::size_t bytes, Generator& g) { using result_type = typename Generator::result_type; + while (bytes >= sizeof(result_type)) { auto const v = g(); @@ -39,15 +40,22 @@ rngfill(void* buffer, std::size_t bytes, Generator& g) buffer = reinterpret_cast(buffer) + sizeof(v); bytes -= sizeof(v); } + + assert(bytes < sizeof(result_type)); + #ifdef __GNUC__ // gcc 11.1 (falsely) warns about an array-bounds overflow in release mode. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" +#endif + if (bytes > 0) { auto const v = g(); std::memcpy(buffer, &v, bytes); } + +#ifdef __GNUC__ #pragma GCC diagnostic pop #endif } diff --git a/src/ripple/ledger/Directory.h b/src/ripple/ledger/Directory.h index c24d348e7e8..0efcf43e773 100644 --- a/src/ripple/ledger/Directory.h +++ b/src/ripple/ledger/Directory.h @@ -79,6 +79,12 @@ class Dir::const_iterator const_iterator operator++(int); + const_iterator& + next_page(); + + std::size_t + page_size(); + Keylet const& page() const { diff --git a/src/ripple/ledger/ReadView.h b/src/ripple/ledger/ReadView.h index bc9c0906a45..714f8dc945d 100644 --- a/src/ripple/ledger/ReadView.h +++ b/src/ripple/ledger/ReadView.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -125,64 +126,6 @@ struct LedgerInfo //------------------------------------------------------------------------------ -class DigestAwareReadView; - -/** Rules controlling protocol behavior. */ -class Rules -{ -private: - class Impl; - - std::shared_ptr impl_; - -public: - Rules(Rules const&) = default; - Rules& - operator=(Rules const&) = default; - - Rules() = delete; - - /** Construct an empty rule set. - - These are the rules reflected by - the genesis ledger. - */ - explicit Rules(std::unordered_set> const& presets); - - /** Construct rules from a ledger. - - The ledger contents are analyzed for rules - and amendments and extracted to the object. - */ - explicit Rules( - DigestAwareReadView const& ledger, - std::unordered_set> const& presets); - - /** Returns `true` if a feature is enabled. */ - bool - enabled(uint256 const& id) const; - - /** Returns `true` if these rules don't match the ledger. */ - bool - changed(DigestAwareReadView const& ledger) const; - - /** Returns `true` if two rule sets are identical. - - @note This is for diagnostics. To determine if new - rules should be constructed, call changed() first instead. - */ - bool - operator==(Rules const&) const; - - bool - operator!=(Rules const& other) const - { - return !(*this == other); - } -}; - -//------------------------------------------------------------------------------ - /** A view into a ledger. This interface provides read access to state @@ -423,6 +366,11 @@ getCloseAgree(LedgerInfo const& info) void addRaw(LedgerInfo const&, Serializer&, bool includeHash = false); +Rules +makeRulesGivenLedger( + DigestAwareReadView const& ledger, + std::unordered_set> const& presets); + } // namespace ripple #include diff --git a/src/ripple/ledger/impl/Directory.cpp b/src/ripple/ledger/impl/Directory.cpp index de7ea0d2d88..759b4d71b74 100644 --- a/src/ripple/ledger/impl/Directory.cpp +++ b/src/ripple/ledger/impl/Directory.cpp @@ -1,5 +1,4 @@ -//------------ -//------------------------------------------------------------------ +//------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled Copyright (c) 2012, 2015 Ripple Labs Inc. @@ -81,44 +80,55 @@ const_iterator::operator++() if (++it_ != std::end(*indexes_)) { index_ = *it_; + cache_ = std::nullopt; + return *this; + } + + return next_page(); +} + +const_iterator +const_iterator::operator++(int) +{ + assert(index_ != beast::zero); + const_iterator tmp(*this); + ++(*this); + return tmp; +} + +const_iterator& +const_iterator::next_page() +{ + auto const next = sle_->getFieldU64(sfIndexNext); + if (next == 0) + { + page_.key = root_.key; + index_ = beast::zero; } else { - auto const next = sle_->getFieldU64(sfIndexNext); - if (next == 0) + page_ = keylet::page(root_, next); + sle_ = view_->read(page_); + assert(sle_); + indexes_ = &sle_->getFieldV256(sfIndexes); + if (indexes_->empty()) { - page_.key = root_.key; index_ = beast::zero; } else { - page_ = keylet::page(root_, next); - sle_ = view_->read(page_); - assert(sle_); - indexes_ = &sle_->getFieldV256(sfIndexes); - if (indexes_->empty()) - { - index_ = beast::zero; - } - else - { - it_ = std::begin(*indexes_); - index_ = *it_; - } + it_ = std::begin(*indexes_); + index_ = *it_; } } - cache_ = std::nullopt; return *this; } -const_iterator -const_iterator::operator++(int) +std::size_t +const_iterator::page_size() { - assert(index_ != beast::zero); - const_iterator tmp(*this); - ++(*this); - return tmp; + return indexes_->size(); } } // namespace ripple diff --git a/src/ripple/ledger/impl/ReadView.cpp b/src/ripple/ledger/impl/ReadView.cpp index 77db253aa16..57af008b47c 100644 --- a/src/ripple/ledger/impl/ReadView.cpp +++ b/src/ripple/ledger/impl/ReadView.cpp @@ -21,108 +21,6 @@ namespace ripple { -class Rules::Impl -{ -private: - std::unordered_set> set_; - std::optional digest_; - std::unordered_set> const& presets_; - -public: - explicit Impl(std::unordered_set> const& presets) - : presets_(presets) - { - } - - explicit Impl( - DigestAwareReadView const& ledger, - std::unordered_set> const& presets) - : presets_(presets) - { - auto const k = keylet::amendments(); - digest_ = ledger.digest(k.key); - if (!digest_) - return; - auto const sle = ledger.read(k); - if (!sle) - { - // LogicError() ? - return; - } - - for (auto const& item : sle->getFieldV256(sfAmendments)) - set_.insert(item); - } - - bool - enabled(uint256 const& feature) const - { - if (presets_.count(feature) > 0) - return true; - return set_.count(feature) > 0; - } - - bool - changed(DigestAwareReadView const& ledger) const - { - auto const digest = ledger.digest(keylet::amendments().key); - if (!digest && !digest_) - return false; - if (!digest || !digest_) - return true; - return *digest != *digest_; - } - - bool - operator==(Impl const& other) const - { - if (!digest_ && !other.digest_) - return true; - if (!digest_ || !other.digest_) - return false; - return *digest_ == *other.digest_; - } -}; - -//------------------------------------------------------------------------------ - -Rules::Rules( - DigestAwareReadView const& ledger, - std::unordered_set> const& presets) - : impl_(std::make_shared(ledger, presets)) -{ -} - -Rules::Rules(std::unordered_set> const& presets) - : impl_(std::make_shared(presets)) -{ -} - -bool -Rules::enabled(uint256 const& id) const -{ - assert(impl_); - return impl_->enabled(id); -} - -bool -Rules::changed(DigestAwareReadView const& ledger) const -{ - assert(impl_); - return impl_->changed(ledger); -} - -bool -Rules::operator==(Rules const& other) const -{ - assert(impl_ && other.impl_); - if (impl_.get() == other.impl_.get()) - return true; - return *impl_ == *other.impl_; -} - -//------------------------------------------------------------------------------ - ReadView::sles_type::sles_type(ReadView const& view) : ReadViewFwdRange(view) { } @@ -167,4 +65,20 @@ ReadView::txs_type::end() const -> iterator return iterator(view_, view_->txsEnd()); } +Rules +makeRulesGivenLedger( + DigestAwareReadView const& ledger, + std::unordered_set> const& presets) +{ + Keylet const k = keylet::amendments(); + std::optional digest = ledger.digest(k.key); + if (digest) + { + auto const sle = ledger.read(k); + if (sle) + return Rules(presets, digest, sle->getFieldV256(sfAmendments)); + } + return Rules(presets); +} + } // namespace ripple diff --git a/src/ripple/net/HTTPDownloader.h b/src/ripple/net/HTTPDownloader.h index 1f2243a4fe4..39b9a904aa3 100644 --- a/src/ripple/net/HTTPDownloader.h +++ b/src/ripple/net/HTTPDownloader.h @@ -61,6 +61,12 @@ class HTTPDownloader : public std::enable_shared_from_this virtual ~HTTPDownloader() = default; + bool + sessionIsActive() const; + + bool + isStopping() const; + protected: // must be accessed through a shared_ptr // use make_XXX functions to create @@ -88,7 +94,7 @@ class HTTPDownloader : public std::enable_shared_from_this std::atomic stop_; // Used to protect sessionActive_ - std::mutex m_; + mutable std::mutex m_; bool sessionActive_; std::condition_variable c_; diff --git a/src/ripple/net/impl/DatabaseBody.ipp b/src/ripple/net/impl/DatabaseBody.ipp index 061a630255c..cdc7da2bc41 100644 --- a/src/ripple/net/impl/DatabaseBody.ipp +++ b/src/ripple/net/impl/DatabaseBody.ipp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include namespace ripple { diff --git a/src/ripple/net/impl/HTTPDownloader.cpp b/src/ripple/net/impl/HTTPDownloader.cpp index 5ed2ceae0db..44d27466224 100644 --- a/src/ripple/net/impl/HTTPDownloader.cpp +++ b/src/ripple/net/impl/HTTPDownloader.cpp @@ -293,6 +293,20 @@ HTTPDownloader::stop() } } +bool +HTTPDownloader::sessionIsActive() const +{ + std::lock_guard lock(m_); + return sessionActive_; +} + +bool +HTTPDownloader::isStopping() const +{ + std::lock_guard lock(m_); + return stop_; +} + void HTTPDownloader::fail( boost::filesystem::path dstPath, diff --git a/src/ripple/nodestore/Database.h b/src/ripple/nodestore/Database.h index bb9304507d9..0f9e95b23e1 100644 --- a/src/ripple/nodestore/Database.h +++ b/src/ripple/nodestore/Database.h @@ -324,6 +324,11 @@ class Database // The earliest shard index std::uint32_t const earliestShardIndex_; + // The maximum number of requests a thread extracts from the queue in an + // attempt to minimize the overhead of mutex acquisition. This is an + // advanced tunable, via the config file. The default value is 4. + int const requestBundle_; + void storeStats(std::uint64_t count, std::uint64_t sz) { @@ -368,6 +373,7 @@ class Database std::atomic readStopping_ = false; std::atomic readThreads_ = 0; + std::atomic runningThreads_ = 0; virtual std::shared_ptr fetchNodeObject( diff --git a/src/ripple/nodestore/impl/Database.cpp b/src/ripple/nodestore/impl/Database.cpp index bf28f5bfbfb..15aad0a02a3 100644 --- a/src/ripple/nodestore/impl/Database.cpp +++ b/src/ripple/nodestore/impl/Database.cpp @@ -43,7 +43,8 @@ Database::Database( , earliestLedgerSeq_( get(config, "earliest_seq", XRP_LEDGER_EARLIEST_SEQ)) , earliestShardIndex_((earliestLedgerSeq_ - 1) / ledgersPerShard_) - , readThreads_(std::min(1, readThreads)) + , requestBundle_(get(config, "rq_bundle", 4)) + , readThreads_(std::max(1, readThreads)) { assert(readThreads != 0); @@ -53,10 +54,15 @@ Database::Database( if (earliestLedgerSeq_ < 1) Throw("Invalid earliest_seq"); - for (int i = 0; i != readThreads_.load(); ++i) + if (requestBundle_ < 1 || requestBundle_ > 64) + Throw("Invalid rq_bundle"); + + for (int i = readThreads_.load(); i != 0; --i) { std::thread t( [this](int i) { + runningThreads_++; + beast::setCurrentThreadName( "db prefetch #" + std::to_string(i)); @@ -68,14 +74,20 @@ Database::Database( std::unique_lock lock(readLock_); if (read_.empty()) + { + runningThreads_--; readCondVar_.wait(lock); + runningThreads_++; + } if (isStopping()) continue; - // We extract up to 64 objects to minimize the overhead - // of acquiring the mutex. - for (int cnt = 0; !read_.empty() && cnt != 64; ++cnt) + // If configured, extract multiple object at a time to + // minimize the overhead of acquiring the mutex. + for (int cnt = 0; + !read_.empty() && cnt != requestBundle_; + ++cnt) read.insert(read_.extract(read_.begin())); } @@ -84,7 +96,7 @@ Database::Database( assert(!it->second.empty()); auto const& hash = it->first; - auto const& data = std::move(it->second); + auto const& data = it->second; auto const seqn = data[0].first; auto obj = @@ -340,6 +352,16 @@ void Database::getCountsJson(Json::Value& obj) { assert(obj.isObject()); + + { + std::unique_lock lock(readLock_); + obj["read_queue"] = static_cast(read_.size()); + } + + obj["read_threads_total"] = readThreads_.load(); + obj["read_threads_running"] = runningThreads_.load(); + obj["read_request_bundle"] = requestBundle_; + obj[jss::node_writes] = std::to_string(storeCount_); obj[jss::node_reads_total] = std::to_string(fetchTotalCount_); obj[jss::node_reads_hit] = std::to_string(fetchHitCount_); diff --git a/src/ripple/nodestore/impl/DatabaseShardImp.cpp b/src/ripple/nodestore/impl/DatabaseShardImp.cpp index 32efaecdb73..1efcbe2ac26 100644 --- a/src/ripple/nodestore/impl/DatabaseShardImp.cpp +++ b/src/ripple/nodestore/impl/DatabaseShardImp.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -799,14 +799,12 @@ DatabaseShardImp::doImportDatabase() std::optional info; if (sortOrder == std::string("asc")) { - info = dynamic_cast( - &app_.getRelationalDBInterface()) + info = dynamic_cast(&app_.getRelationalDatabase()) ->getLimitedOldestLedgerInfo(earliestLedgerSeq()); } else { - info = dynamic_cast( - &app_.getRelationalDBInterface()) + info = dynamic_cast(&app_.getRelationalDatabase()) ->getLimitedNewestLedgerInfo(earliestLedgerSeq()); } if (info) @@ -925,7 +923,7 @@ DatabaseShardImp::doImportDatabase() // Verify SQLite ledgers are in the node store { auto const ledgerHashes{ - app_.getRelationalDBInterface().getHashesByIndex( + app_.getRelationalDatabase().getHashesByIndex( firstSeq, lastSeq)}; if (ledgerHashes.size() != maxLedgers(shardIndex)) continue; @@ -2026,6 +2024,13 @@ DatabaseShardImp::callForLedgerSQLByLedgerSeq( LedgerIndex ledgerSeq, std::function const& callback) { + if (ledgerSeq < earliestLedgerSeq_) + { + JLOG(j_.warn()) << "callForLedgerSQLByLedgerSeq ledger seq too early: " + << ledgerSeq; + return false; + } + return callForLedgerSQLByShardIndex(seqToShardIndex(ledgerSeq), callback); } diff --git a/src/ripple/nodestore/impl/Shard.cpp b/src/ripple/nodestore/impl/Shard.cpp index dc5033c969b..911eedef6b5 100644 --- a/src/ripple/nodestore/impl/Shard.cpp +++ b/src/ripple/nodestore/impl/Shard.cpp @@ -19,8 +19,7 @@ #include #include -#include -#include +#include #include #include #include @@ -28,9 +27,6 @@ #include #include -#include -#include - namespace ripple { namespace NodeStore { diff --git a/src/ripple/nodestore/impl/Shard.h b/src/ripple/nodestore/impl/Shard.h index 17001a6b829..b7516e5f1e6 100644 --- a/src/ripple/nodestore/impl/Shard.h +++ b/src/ripple/nodestore/impl/Shard.h @@ -21,7 +21,7 @@ #define RIPPLE_NODESTORE_SHARD_H_INCLUDED #include -#include +#include #include #include #include diff --git a/src/ripple/overlay/impl/OverlayImpl.cpp b/src/ripple/overlay/impl/OverlayImpl.cpp index 47e03a76b19..6ed046f0403 100644 --- a/src/ripple/overlay/impl/OverlayImpl.cpp +++ b/src/ripple/overlay/impl/OverlayImpl.cpp @@ -22,8 +22,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include diff --git a/src/ripple/overlay/impl/PeerImp.cpp b/src/ripple/overlay/impl/PeerImp.cpp index bc379c14725..60870c90a15 100644 --- a/src/ripple/overlay/impl/PeerImp.cpp +++ b/src/ripple/overlay/impl/PeerImp.cpp @@ -3404,7 +3404,7 @@ PeerImp::getLedger(std::shared_ptr const& m) } else { - JLOG(p_journal_.warn()) << "getLedger: Unable to find ledger"; + JLOG(p_journal_.debug()) << "getLedger: Unable to find ledger"; } return ledger; diff --git a/src/ripple/overlay/impl/PeerReservationTable.cpp b/src/ripple/overlay/impl/PeerReservationTable.cpp index 6e88da123f3..6f39d12e99c 100644 --- a/src/ripple/overlay/impl/PeerReservationTable.cpp +++ b/src/ripple/overlay/impl/PeerReservationTable.cpp @@ -19,8 +19,8 @@ #include -#include -#include +#include +#include #include #include #include diff --git a/src/ripple/peerfinder/impl/StoreSqdb.h b/src/ripple/peerfinder/impl/StoreSqdb.h index 7d43136ae96..879bee83b6f 100644 --- a/src/ripple/peerfinder/impl/StoreSqdb.h +++ b/src/ripple/peerfinder/impl/StoreSqdb.h @@ -20,7 +20,7 @@ #ifndef RIPPLE_PEERFINDER_STORESQDB_H_INCLUDED #define RIPPLE_PEERFINDER_STORESQDB_H_INCLUDED -#include +#include #include #include #include diff --git a/src/ripple/proto/org/xrpl/rpc/v1/common.proto b/src/ripple/proto/org/xrpl/rpc/v1/common.proto index 81718b507cf..fd514cbacee 100644 --- a/src/ripple/proto/org/xrpl/rpc/v1/common.proto +++ b/src/ripple/proto/org/xrpl/rpc/v1/common.proto @@ -373,6 +373,12 @@ message RootIndex bytes value = 1; } +message WalletLocator +{ + // 32 bytes + bytes value = 1; +} + // *** Messages wrapping variable length byte arrays *** @@ -586,6 +592,8 @@ message SignerEntry Account account = 1; SignerWeight signer_weight = 2; + + WalletLocator wallet_locator = 3; } // Next field: 3 diff --git a/src/ripple/protocol/Feature.h b/src/ripple/protocol/Feature.h index 9087bec992e..b3ecb099bcc 100644 --- a/src/ripple/protocol/Feature.h +++ b/src/ripple/protocol/Feature.h @@ -74,7 +74,7 @@ namespace detail { // Feature.cpp. Because it's only used to reserve storage, and determine how // large to make the FeatureBitset, it MAY be larger. It MUST NOT be less than // the actual number of amendments. A LogicError on startup will verify this. -static constexpr std::size_t numFeatures = 47; +static constexpr std::size_t numFeatures = 48; /** Amendments that this server supports and the default voting behavior. Whether they are enabled depends on the Rules defined in the validated @@ -334,6 +334,8 @@ extern uint256 const fixSTAmountCanonicalize; extern uint256 const fixRmSmallIncreasedQOffers; extern uint256 const featureCheckCashMakesTrustLine; extern uint256 const featureNonFungibleTokensV1; +extern uint256 const featureExpandedSignerList; +extern uint256 const fixNFTokenDirV1; } // namespace ripple diff --git a/src/ripple/protocol/Rules.h b/src/ripple/protocol/Rules.h new file mode 100644 index 00000000000..d8190e86a71 --- /dev/null +++ b/src/ripple/protocol/Rules.h @@ -0,0 +1,86 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#ifndef RIPPLE_LEDGER_RULES_H_INCLUDED +#define RIPPLE_LEDGER_RULES_H_INCLUDED + +#include +#include +#include +#include + +namespace ripple { + +class DigestAwareReadView; + +/** Rules controlling protocol behavior. */ +class Rules +{ +private: + class Impl; + + // Carrying impl by shared_ptr makes Rules comparatively cheap to pass + // by value. + std::shared_ptr impl_; + +public: + Rules(Rules const&) = default; + + Rules& + operator=(Rules const&) = default; + + Rules() = delete; + + /** Construct an empty rule set. + + These are the rules reflected by + the genesis ledger. + */ + explicit Rules(std::unordered_set> const& presets); + +private: + // Allow a friend function to construct Rules. + friend Rules + makeRulesGivenLedger( + DigestAwareReadView const& ledger, + std::unordered_set> const& presets); + + Rules( + std::unordered_set> const& presets, + std::optional const& digest, + STVector256 const& amendments); + +public: + /** Returns `true` if a feature is enabled. */ + bool + enabled(uint256 const& feature) const; + + /** Returns `true` if two rule sets are identical. + + @note This is for diagnostics. + */ + bool + operator==(Rules const&) const; + + bool + operator!=(Rules const& other) const; +}; + +} // namespace ripple +#endif diff --git a/src/ripple/protocol/STTx.h b/src/ripple/protocol/STTx.h index ca33abf8acd..c6a9e053c3d 100644 --- a/src/ripple/protocol/STTx.h +++ b/src/ripple/protocol/STTx.h @@ -21,7 +21,9 @@ #define RIPPLE_PROTOCOL_STTX_H_INCLUDED #include +#include #include +#include #include #include #include @@ -47,7 +49,16 @@ class STTx final : public STObject, public CountedObject public: static std::size_t const minMultiSigners = 1; - static std::size_t const maxMultiSigners = 8; + + // if rules are not supplied then the largest possible value is returned + static std::size_t + maxMultiSigners(Rules const* rules = 0) + { + if (rules && !rules->enabled(featureExpandedSignerList)) + return 8; + + return 32; + } STTx() = delete; STTx(STTx const& other) = default; @@ -108,7 +119,8 @@ class STTx final : public STObject, public CountedObject */ enum class RequireFullyCanonicalSig : bool { no, yes }; Expected - checkSign(RequireFullyCanonicalSig requireCanonicalSig) const; + checkSign(RequireFullyCanonicalSig requireCanonicalSig, Rules const& rules) + const; // SQL Functions with metadata. static std::string const& @@ -130,7 +142,9 @@ class STTx final : public STObject, public CountedObject checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const; Expected - checkMultiSign(RequireFullyCanonicalSig requireCanonicalSig) const; + checkMultiSign( + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const; STBase* copy(std::size_t n, void* buf) const override; diff --git a/src/ripple/protocol/impl/BuildInfo.cpp b/src/ripple/protocol/impl/BuildInfo.cpp index 6cd3668dde5..fc32e65c4ca 100644 --- a/src/ripple/protocol/impl/BuildInfo.cpp +++ b/src/ripple/protocol/impl/BuildInfo.cpp @@ -33,7 +33,7 @@ namespace BuildInfo { // and follow the format described at http://semver.org/ //------------------------------------------------------------------------------ // clang-format off -char const* const versionString = "1.9.0" +char const* const versionString = "1.9.1-b1" // clang-format on #if defined(DEBUG) || defined(SANITIZER) diff --git a/src/ripple/protocol/impl/Feature.cpp b/src/ripple/protocol/impl/Feature.cpp index d713dc8c43b..f6f67c003bc 100644 --- a/src/ripple/protocol/impl/Feature.cpp +++ b/src/ripple/protocol/impl/Feature.cpp @@ -438,6 +438,8 @@ REGISTER_FIX (fixSTAmountCanonicalize, Supported::yes, DefaultVote::yes REGISTER_FIX (fixRmSmallIncreasedQOffers, Supported::yes, DefaultVote::yes); REGISTER_FEATURE(CheckCashMakesTrustLine, Supported::yes, DefaultVote::no); REGISTER_FEATURE(NonFungibleTokensV1, Supported::yes, DefaultVote::no); +REGISTER_FEATURE(ExpandedSignerList, Supported::yes, DefaultVote::no); +REGISTER_FIX (fixNFTokenDirV1, Supported::yes, DefaultVote::no); // The following amendments have been active for at least two years. Their // pre-amendment code has been removed and the identifiers are deprecated. diff --git a/src/ripple/protocol/impl/InnerObjectFormats.cpp b/src/ripple/protocol/impl/InnerObjectFormats.cpp index c1b2acc87d2..1b4aa63c2ba 100644 --- a/src/ripple/protocol/impl/InnerObjectFormats.cpp +++ b/src/ripple/protocol/impl/InnerObjectFormats.cpp @@ -28,6 +28,7 @@ InnerObjectFormats::InnerObjectFormats() { {sfAccount, soeREQUIRED}, {sfSignerWeight, soeREQUIRED}, + {sfWalletLocator, soeOPTIONAL}, }); add(sfSigner.jsonName.c_str(), diff --git a/src/ripple/protocol/impl/Rules.cpp b/src/ripple/protocol/impl/Rules.cpp new file mode 100644 index 00000000000..3736764fcf9 --- /dev/null +++ b/src/ripple/protocol/impl/Rules.cpp @@ -0,0 +1,103 @@ +//------------------------------------------------------------------------------ +/* + This file is part of rippled: https://github.com/ripple/rippled + Copyright (c) 2012, 2013 Ripple Labs Inc. + + Permission to use, copy, modify, and/or distribute this software for any + purpose with or without fee is hereby granted, provided that the above + copyright notice and this permission notice appear in all copies. + + THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +*/ +//============================================================================== + +#include + +#include + +namespace ripple { + +class Rules::Impl +{ +private: + std::unordered_set> set_; + std::optional digest_; + std::unordered_set> const& presets_; + +public: + explicit Impl(std::unordered_set> const& presets) + : presets_(presets) + { + } + + Impl( + std::unordered_set> const& presets, + std::optional const& digest, + STVector256 const& amendments) + : presets_(presets) + { + digest_ = digest; + set_.reserve(amendments.size()); + set_.insert(amendments.begin(), amendments.end()); + } + + bool + enabled(uint256 const& feature) const + { + if (presets_.count(feature) > 0) + return true; + return set_.count(feature) > 0; + } + + bool + operator==(Impl const& other) const + { + if (!digest_ && !other.digest_) + return true; + if (!digest_ || !other.digest_) + return false; + return *digest_ == *other.digest_; + } +}; + +Rules::Rules(std::unordered_set> const& presets) + : impl_(std::make_shared(presets)) +{ +} + +Rules::Rules( + std::unordered_set> const& presets, + std::optional const& digest, + STVector256 const& amendments) + : impl_(std::make_shared(presets, digest, amendments)) +{ +} + +bool +Rules::enabled(uint256 const& feature) const +{ + assert(impl_); + return impl_->enabled(feature); +} + +bool +Rules::operator==(Rules const& other) const +{ + assert(impl_ && other.impl_); + if (impl_.get() == other.impl_.get()) + return true; + return *impl_ == *other.impl_; +} + +bool +Rules::operator!=(Rules const& other) const +{ + return !(*this == other); +} +} // namespace ripple diff --git a/src/ripple/protocol/impl/STTx.cpp b/src/ripple/protocol/impl/STTx.cpp index c8e05c74211..66d20f3167a 100644 --- a/src/ripple/protocol/impl/STTx.cpp +++ b/src/ripple/protocol/impl/STTx.cpp @@ -206,7 +206,9 @@ STTx::sign(PublicKey const& publicKey, SecretKey const& secretKey) } Expected -STTx::checkSign(RequireFullyCanonicalSig requireCanonicalSig) const +STTx::checkSign( + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const { try { @@ -214,8 +216,9 @@ STTx::checkSign(RequireFullyCanonicalSig requireCanonicalSig) const // at the SigningPubKey. If it's empty we must be // multi-signing. Otherwise we're single-signing. Blob const& signingPubKey = getFieldVL(sfSigningPubKey); - return signingPubKey.empty() ? checkMultiSign(requireCanonicalSig) - : checkSingleSign(requireCanonicalSig); + return signingPubKey.empty() + ? checkMultiSign(requireCanonicalSig, rules) + : checkSingleSign(requireCanonicalSig); } catch (std::exception const&) { @@ -327,7 +330,9 @@ STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const } Expected -STTx::checkMultiSign(RequireFullyCanonicalSig requireCanonicalSig) const +STTx::checkMultiSign( + RequireFullyCanonicalSig requireCanonicalSig, + Rules const& rules) const { // Make sure the MultiSigners are present. Otherwise they are not // attempting multi-signing and we just have a bad SigningPubKey. @@ -342,7 +347,8 @@ STTx::checkMultiSign(RequireFullyCanonicalSig requireCanonicalSig) const STArray const& signers{getFieldArray(sfSigners)}; // There are well known bounds that the number of signers must be within. - if (signers.size() < minMultiSigners || signers.size() > maxMultiSigners) + if (signers.size() < minMultiSigners || + signers.size() > maxMultiSigners(&rules)) return Unexpected("Invalid Signers array size."); // We can ease the computational load inside the loop a bit by diff --git a/src/ripple/rpc/handlers/AccountObjects.cpp b/src/ripple/rpc/handlers/AccountObjects.cpp index 55fe4e4136b..4dcb3aba7de 100644 --- a/src/ripple/rpc/handlers/AccountObjects.cpp +++ b/src/ripple/rpc/handlers/AccountObjects.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -101,21 +102,41 @@ doAccountNFTs(RPC::JsonContext& context) auto& nfts = (result[jss::account_nfts] = Json::arrayValue); // Continue iteration from the current page: - + bool pastMarker = marker.isZero(); + uint256 const maskedMarker = marker & nft::pageMask; while (cp) { auto arr = cp->getFieldArray(sfNFTokens); for (auto const& o : arr) { - if (o.getFieldH256(sfNFTokenID) <= marker) + // Scrolling past the marker gets weird. We need to look at + // a couple of conditions. + // + // 1. If the low 96-bits don't match, then we compare only + // against the low 96-bits, since that's what determines + // the sort order of the pages. + // + // 2. However, within one page there can be a number of + // NFTokenIDs that all have the same low 96 bits. If we're + // in that case then we need to compare against the full + // 256 bits. + uint256 const nftokenID = o[sfNFTokenID]; + uint256 const maskedNftokenID = nftokenID & nft::pageMask; + + if (!pastMarker && maskedNftokenID < maskedMarker) + continue; + + if (!pastMarker && maskedNftokenID == maskedMarker && + nftokenID <= marker) continue; + pastMarker = true; + { Json::Value& obj = nfts.append(o.getJson(JsonOptions::none)); // Pull out the components of the nft ID. - uint256 const nftokenID = o[sfNFTokenID]; obj[sfFlags.jsonName] = nft::getFlags(nftokenID); obj[sfIssuer.jsonName] = to_string(nft::getIssuer(nftokenID)); obj[sfNFTokenTaxon.jsonName] = diff --git a/src/ripple/rpc/handlers/AccountTx.cpp b/src/ripple/rpc/handlers/AccountTx.cpp index e383b66feed..e5e7d54382a 100644 --- a/src/ripple/rpc/handlers/AccountTx.cpp +++ b/src/ripple/rpc/handlers/AccountTx.cpp @@ -21,8 +21,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -42,14 +42,14 @@ namespace ripple { -using TxnsData = RelationalDBInterface::AccountTxs; -using TxnsDataBinary = RelationalDBInterface::MetaTxsList; -using TxnDataBinary = RelationalDBInterface::txnMetaLedgerType; -using AccountTxArgs = RelationalDBInterface::AccountTxArgs; -using AccountTxResult = RelationalDBInterface::AccountTxResult; +using TxnsData = RelationalDatabase::AccountTxs; +using TxnsDataBinary = RelationalDatabase::MetaTxsList; +using TxnDataBinary = RelationalDatabase::txnMetaLedgerType; +using AccountTxArgs = RelationalDatabase::AccountTxArgs; +using AccountTxResult = RelationalDatabase::AccountTxResult; -using LedgerShortcut = RelationalDBInterface::LedgerShortcut; -using LedgerSpecifier = RelationalDBInterface::LedgerSpecifier; +using LedgerShortcut = RelationalDatabase::LedgerShortcut; +using LedgerSpecifier = RelationalDatabase::LedgerSpecifier; // parses args into a ledger specifier, or returns a grpc status object on error std::variant, grpc::Status> @@ -257,9 +257,15 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) { context.loadType = Resource::feeMediumBurdenRPC; if (context.app.config().reporting()) - return dynamic_cast( - &context.app.getRelationalDBInterface()) - ->getAccountTx(args); + { + auto const db = dynamic_cast( + &context.app.getRelationalDatabase()); + + if (!db) + Throw("Failed to get relational database"); + + return db->getAccountTx(args); + } AccountTxResult result; @@ -274,7 +280,7 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) result.marker = args.marker; - RelationalDBInterface::AccountTxPageOptions options = { + RelationalDatabase::AccountTxPageOptions options = { args.account, result.ledgerRange.min, result.ledgerRange.max, @@ -282,21 +288,23 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) args.limit, isUnlimited(context.role)}; + auto const db = + dynamic_cast(&context.app.getRelationalDatabase()); + + if (!db) + Throw("Failed to get relational database"); + if (args.binary) { if (args.forward) { - auto [tx, marker] = dynamic_cast( - &context.app.getRelationalDBInterface()) - ->oldestAccountTxPageB(options); + auto [tx, marker] = db->oldestAccountTxPageB(options); result.transactions = tx; result.marker = marker; } else { - auto [tx, marker] = dynamic_cast( - &context.app.getRelationalDBInterface()) - ->newestAccountTxPageB(options); + auto [tx, marker] = db->newestAccountTxPageB(options); result.transactions = tx; result.marker = marker; } @@ -305,17 +313,13 @@ doAccountTxHelp(RPC::Context& context, AccountTxArgs const& args) { if (args.forward) { - auto [tx, marker] = dynamic_cast( - &context.app.getRelationalDBInterface()) - ->oldestAccountTxPage(options); + auto [tx, marker] = db->oldestAccountTxPage(options); result.transactions = tx; result.marker = marker; } else { - auto [tx, marker] = dynamic_cast( - &context.app.getRelationalDBInterface()) - ->newestAccountTxPage(options); + auto [tx, marker] = db->newestAccountTxPage(options); result.transactions = tx; result.marker = marker; } diff --git a/src/ripple/rpc/handlers/AccountTxOld.cpp b/src/ripple/rpc/handlers/AccountTxOld.cpp index 9c5bb0bcebb..1bc64247c36 100644 --- a/src/ripple/rpc/handlers/AccountTxOld.cpp +++ b/src/ripple/rpc/handlers/AccountTxOld.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -152,7 +152,7 @@ doAccountTxOld(RPC::JsonContext& context) ret[jss::account] = context.app.accountIDCache().toBase58(*raAccount); Json::Value& jvTxns = (ret[jss::transactions] = Json::arrayValue); - RelationalDBInterface::AccountTxOptions options = { + RelationalDatabase::AccountTxOptions options = { *raAccount, uLedgerMin, uLedgerMax, @@ -162,15 +162,15 @@ doAccountTxOld(RPC::JsonContext& context) if (bBinary) { - std::vector txns; + std::vector txns; if (bDescending) - txns = dynamic_cast( - &context.app.getRelationalDBInterface()) + txns = dynamic_cast( + &context.app.getRelationalDatabase()) ->getNewestAccountTxsB(options); else - txns = dynamic_cast( - &context.app.getRelationalDBInterface()) + txns = dynamic_cast( + &context.app.getRelationalDatabase()) ->getOldestAccountTxsB(options); for (auto it = txns.begin(), end = txns.end(); it != end; ++it) @@ -189,15 +189,15 @@ doAccountTxOld(RPC::JsonContext& context) } else { - RelationalDBInterface::AccountTxs txns; + RelationalDatabase::AccountTxs txns; if (bDescending) - txns = dynamic_cast( - &context.app.getRelationalDBInterface()) + txns = dynamic_cast( + &context.app.getRelationalDatabase()) ->getNewestAccountTxs(options); else - txns = dynamic_cast( - &context.app.getRelationalDBInterface()) + txns = dynamic_cast( + &context.app.getRelationalDatabase()) ->getOldestAccountTxs(options); for (auto it = txns.begin(), end = txns.end(); it != end; ++it) diff --git a/src/ripple/rpc/handlers/GetCounts.cpp b/src/ripple/rpc/handlers/GetCounts.cpp index acb306449df..cf3e7290202 100644 --- a/src/ripple/rpc/handlers/GetCounts.cpp +++ b/src/ripple/rpc/handlers/GetCounts.cpp @@ -22,7 +22,7 @@ #include #include #include -#include +#include #include #include #include @@ -75,23 +75,23 @@ getCountsJson(Application& app, int minObjectCount) if (!app.config().reporting() && app.config().useTxTables()) { - auto dbKB = dynamic_cast( - &app.getRelationalDBInterface()) - ->getKBUsedAll(); + auto const db = + dynamic_cast(&app.getRelationalDatabase()); + + if (!db) + Throw("Failed to get relational database"); + + auto dbKB = db->getKBUsedAll(); if (dbKB > 0) ret[jss::dbKBTotal] = dbKB; - dbKB = dynamic_cast( - &app.getRelationalDBInterface()) - ->getKBUsedLedger(); + dbKB = db->getKBUsedLedger(); if (dbKB > 0) ret[jss::dbKBLedger] = dbKB; - dbKB = dynamic_cast( - &app.getRelationalDBInterface()) - ->getKBUsedTransaction(); + dbKB = db->getKBUsedTransaction(); if (dbKB > 0) ret[jss::dbKBTransaction] = dbKB; diff --git a/src/ripple/rpc/handlers/TxHistory.cpp b/src/ripple/rpc/handlers/TxHistory.cpp index 7fa7fc76f9b..4c76bfac026 100644 --- a/src/ripple/rpc/handlers/TxHistory.cpp +++ b/src/ripple/rpc/handlers/TxHistory.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include @@ -54,8 +54,7 @@ doTxHistory(RPC::JsonContext& context) if ((startIndex > 10000) && (!isUnlimited(context.role))) return rpcError(rpcNO_PERMISSION); - auto trans = - context.app.getRelationalDBInterface().getTxHistory(startIndex); + auto trans = context.app.getRelationalDatabase().getTxHistory(startIndex); Json::Value obj; Json::Value& txs = obj[jss::txs]; diff --git a/src/ripple/rpc/impl/GRPCHelpers.cpp b/src/ripple/rpc/impl/GRPCHelpers.cpp index 558c9d53566..e06512ce2c8 100644 --- a/src/ripple/rpc/impl/GRPCHelpers.cpp +++ b/src/ripple/rpc/impl/GRPCHelpers.cpp @@ -746,6 +746,14 @@ populateSignerListID(T& to, STObject const& from) [&to]() { return to.mutable_signer_list_id(); }, from, sfSignerListID); } +template +void +populateWalletLocator(T& to, STObject const& from) +{ + populateProtoPrimitive( + [&to]() { return to.mutable_wallet_locator(); }, from, sfWalletLocator); +} + template void populateTicketSequence(T& to, STObject const& from) @@ -1012,6 +1020,7 @@ populateSignerEntries(T& to, STObject const& from) [](auto& innerObj, auto& innerProto) { populateAccount(innerProto, innerObj); populateSignerWeight(innerProto, innerObj); + populateWalletLocator(innerProto, innerObj); }, from, sfSignerEntries, diff --git a/src/ripple/rpc/impl/RPCHelpers.cpp b/src/ripple/rpc/impl/RPCHelpers.cpp index 499f12323f3..6958ce9d9bb 100644 --- a/src/ripple/rpc/impl/RPCHelpers.cpp +++ b/src/ripple/rpc/impl/RPCHelpers.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -537,7 +537,7 @@ isValidated( { assert(hash->isNonZero()); uint256 valHash = - app.getRelationalDBInterface().getHashByIndex(seq); + app.getRelationalDatabase().getHashByIndex(seq); if (valHash == ledger.info().hash) { // SQL database doesn't match ledger chain @@ -696,19 +696,31 @@ parseRippleLibSeed(Json::Value const& value) std::optional getSeedFromRPC(Json::Value const& params, Json::Value& error) { - // The array should be constexpr, but that makes Visual Studio unhappy. - static char const* const seedTypes[]{ - jss::passphrase.c_str(), jss::seed.c_str(), jss::seed_hex.c_str()}; + using string_to_seed_t = + std::function(std::string const&)>; + using seed_match_t = std::pair; + + static seed_match_t const seedTypes[]{ + {jss::passphrase.c_str(), + [](std::string const& s) { return parseGenericSeed(s); }}, + {jss::seed.c_str(), + [](std::string const& s) { return parseBase58(s); }}, + {jss::seed_hex.c_str(), [](std::string const& s) { + uint128 i; + if (i.parseHex(s)) + return std::optional(Slice(i.data(), i.size())); + return std::optional{}; + }}}; // Identify which seed type is in use. - char const* seedType = nullptr; + seed_match_t const* seedType = nullptr; int count = 0; - for (auto t : seedTypes) + for (auto const& t : seedTypes) { - if (params.isMember(t)) + if (params.isMember(t.first)) { ++count; - seedType = t; + seedType = &t; } } @@ -722,28 +734,17 @@ getSeedFromRPC(Json::Value const& params, Json::Value& error) } // Make sure a string is present - if (!params[seedType].isString()) + auto const& param = params[seedType->first]; + if (!param.isString()) { - error = RPC::expected_field_error(seedType, "string"); + error = RPC::expected_field_error(seedType->first, "string"); return std::nullopt; } - auto const fieldContents = params[seedType].asString(); + auto const fieldContents = param.asString(); // Convert string to seed. - std::optional seed; - - if (seedType == jss::seed.c_str()) - seed = parseBase58(fieldContents); - else if (seedType == jss::passphrase.c_str()) - seed = parseGenericSeed(fieldContents); - else if (seedType == jss::seed_hex.c_str()) - { - uint128 s; - - if (s.parseHex(fieldContents)) - seed.emplace(Slice(s.data(), s.size())); - } + std::optional seed = seedType->second(fieldContents); if (!seed) error = rpcError(rpcBAD_SEED); @@ -757,7 +758,6 @@ keypairForSignature(Json::Value const& params, Json::Value& error) bool const has_key_type = params.isMember(jss::key_type); // All of the secret types we allow, but only one at a time. - // The array should be constexpr, but that makes Visual Studio unhappy. static char const* const secretTypes[]{ jss::passphrase.c_str(), jss::secret.c_str(), @@ -811,7 +811,9 @@ keypairForSignature(Json::Value const& params, Json::Value& error) return {}; } - if (secretType == jss::secret.c_str()) + // using strcmp as pointers may not match (see + // https://developercommunity.visualstudio.com/t/assigning-constexpr-char--to-static-cha/10021357?entry=problem) + if (strcmp(secretType, jss::secret.c_str()) == 0) { error = RPC::make_param_error( "The secret field is not allowed if " + @@ -823,7 +825,9 @@ keypairForSignature(Json::Value const& params, Json::Value& error) // ripple-lib encodes seed used to generate an Ed25519 wallet in a // non-standard way. While we never encode seeds that way, we try // to detect such keys to avoid user confusion. - if (secretType != jss::seed_hex.c_str()) + // using strcmp as pointers may not match (see + // https://developercommunity.visualstudio.com/t/assigning-constexpr-char--to-static-cha/10021357?entry=problem) + if (strcmp(secretType, jss::seed_hex.c_str()) != 0) { seed = RPC::parseRippleLibSeed(params[secretType]); diff --git a/src/ripple/rpc/impl/ShardArchiveHandler.cpp b/src/ripple/rpc/impl/ShardArchiveHandler.cpp index efa422c8bf5..5e5635475e1 100644 --- a/src/ripple/rpc/impl/ShardArchiveHandler.cpp +++ b/src/ripple/rpc/impl/ShardArchiveHandler.cpp @@ -18,7 +18,7 @@ //============================================================================== #include -#include +#include #include #include #include diff --git a/src/test/app/Manifest_test.cpp b/src/test/app/Manifest_test.cpp index 47b16d94883..db66e09e518 100644 --- a/src/test/app/Manifest_test.cpp +++ b/src/test/app/Manifest_test.cpp @@ -20,7 +20,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/src/test/app/MultiSign_test.cpp b/src/test/app/MultiSign_test.cpp index 6e9278f51da..433cf2f7cd8 100644 --- a/src/test/app/MultiSign_test.cpp +++ b/src/test/app/MultiSign_test.cpp @@ -34,6 +34,30 @@ class MultiSign_test : public beast::unit_test::suite jtx::Account const phase{"phase", KeyType::ed25519}; jtx::Account const shade{"shade", KeyType::secp256k1}; jtx::Account const spook{"spook", KeyType::ed25519}; + jtx::Account const acc10{"acc10", KeyType::ed25519}; + jtx::Account const acc11{"acc11", KeyType::ed25519}; + jtx::Account const acc12{"acc12", KeyType::ed25519}; + jtx::Account const acc13{"acc13", KeyType::ed25519}; + jtx::Account const acc14{"acc14", KeyType::ed25519}; + jtx::Account const acc15{"acc15", KeyType::ed25519}; + jtx::Account const acc16{"acc16", KeyType::ed25519}; + jtx::Account const acc17{"acc17", KeyType::ed25519}; + jtx::Account const acc18{"acc18", KeyType::ed25519}; + jtx::Account const acc19{"acc19", KeyType::ed25519}; + jtx::Account const acc20{"acc20", KeyType::ed25519}; + jtx::Account const acc21{"acc21", KeyType::ed25519}; + jtx::Account const acc22{"acc22", KeyType::ed25519}; + jtx::Account const acc23{"acc23", KeyType::ed25519}; + jtx::Account const acc24{"acc24", KeyType::ed25519}; + jtx::Account const acc25{"acc25", KeyType::ed25519}; + jtx::Account const acc26{"acc26", KeyType::ed25519}; + jtx::Account const acc27{"acc27", KeyType::ed25519}; + jtx::Account const acc28{"acc28", KeyType::ed25519}; + jtx::Account const acc29{"acc29", KeyType::ed25519}; + jtx::Account const acc30{"acc30", KeyType::ed25519}; + jtx::Account const acc31{"acc31", KeyType::ed25519}; + jtx::Account const acc32{"acc32", KeyType::ed25519}; + jtx::Account const acc33{"acc33", KeyType::ed25519}; public: void @@ -159,22 +183,30 @@ class MultiSign_test : public beast::unit_test::suite {spook, 1}}), ter(temBAD_QUORUM)); - // Make a signer list that's too big. Should fail. + // clang-format off + // Make a signer list that's too big. Should fail. (Even with + // ExpandedSignerList) Account const spare("spare", KeyType::secp256k1); env(signers( alice, 1, - {{bogie, 1}, - {demon, 1}, - {ghost, 1}, - {haunt, 1}, - {jinni, 1}, - {phase, 1}, - {shade, 1}, - {spook, 1}, - {spare, 1}}), + features[featureExpandedSignerList] + ? std::vector{{bogie, 1}, {demon, 1}, {ghost, 1}, + {haunt, 1}, {jinni, 1}, {phase, 1}, + {shade, 1}, {spook, 1}, {spare, 1}, + {acc10, 1}, {acc11, 1}, {acc12, 1}, + {acc13, 1}, {acc14, 1}, {acc15, 1}, + {acc16, 1}, {acc17, 1}, {acc18, 1}, + {acc19, 1}, {acc20, 1}, {acc21, 1}, + {acc22, 1}, {acc23, 1}, {acc24, 1}, + {acc25, 1}, {acc26, 1}, {acc27, 1}, + {acc28, 1}, {acc29, 1}, {acc30, 1}, + {acc31, 1}, {acc32, 1}, {acc33, 1}} + : std::vector{{bogie, 1}, {demon, 1}, {ghost, 1}, + {haunt, 1}, {jinni, 1}, {phase, 1}, + {shade, 1}, {spook, 1}, {spare, 1}}), ter(temMALFORMED)); - + // clang-format on env.close(); env.require(owners(alice, 0)); } @@ -1149,20 +1181,56 @@ class MultiSign_test : public beast::unit_test::suite "fails local checks: Invalid Signers array size."); } { - // Multisign 9 times should fail. + // Multisign 9 (!ExpandedSignerList) | 33 (ExpandedSignerList) times + // should fail. JTx tx = env.jt( noop(alice), fee(2 * baseFee), - msig( - bogie, - bogie, - bogie, - bogie, - bogie, - bogie, - bogie, - bogie, - bogie)); + + features[featureExpandedSignerList] ? msig( + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie) + : msig( + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie, + bogie)); STTx local = *(tx.stx); auto const info = submitSTTx(local); BEAST_EXPECT( @@ -1517,6 +1585,82 @@ class MultiSign_test : public beast::unit_test::suite BEAST_EXPECT(env.seq(alice) == aliceSeq); } + void + test_signersWithTags(FeatureBitset features) + { + if (!features[featureExpandedSignerList]) + return; + + testcase("Signers With Tags"); + + using namespace jtx; + Env env{*this, features}; + Account const alice{"alice", KeyType::ed25519}; + env.fund(XRP(1000), alice); + env.close(); + uint8_t tag1[] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}; + + uint8_t tag2[] = + "hello world some ascii 32b long"; // including 1 byte for NUL + + uint256 bogie_tag = ripple::base_uint<256>::fromVoid(tag1); + uint256 demon_tag = ripple::base_uint<256>::fromVoid(tag2); + + // Attach phantom signers to alice and use them for a transaction. + env(signers(alice, 1, {{bogie, 1, bogie_tag}, {demon, 1, demon_tag}})); + env.close(); + env.require(owners(alice, features[featureMultiSignReserve] ? 1 : 4)); + + // This should work. + auto const baseFee = env.current()->fees().base; + std::uint32_t aliceSeq = env.seq(alice); + env(noop(alice), msig(bogie, demon), fee(3 * baseFee)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + + // Either signer alone should work. + aliceSeq = env.seq(alice); + env(noop(alice), msig(bogie), fee(2 * baseFee)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + + aliceSeq = env.seq(alice); + env(noop(alice), msig(demon), fee(2 * baseFee)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + + // Duplicate signers should fail. + aliceSeq = env.seq(alice); + env(noop(alice), msig(demon, demon), fee(3 * baseFee), ter(temINVALID)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq); + + // A non-signer should fail. + aliceSeq = env.seq(alice); + env(noop(alice), + msig(bogie, spook), + fee(3 * baseFee), + ter(tefBAD_SIGNATURE)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq); + + // Don't meet the quorum. Should fail. + env(signers(alice, 2, {{bogie, 1}, {demon, 1}})); + aliceSeq = env.seq(alice); + env(noop(alice), msig(bogie), fee(2 * baseFee), ter(tefBAD_QUORUM)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq); + + // Meet the quorum. Should succeed. + aliceSeq = env.seq(alice); + env(noop(alice), msig(bogie, demon), fee(3 * baseFee)); + env.close(); + BEAST_EXPECT(env.seq(alice) == aliceSeq + 1); + } + void testAll(FeatureBitset features) { @@ -1537,6 +1681,7 @@ class MultiSign_test : public beast::unit_test::suite test_multisigningMultisigner(features); test_signForHash(features); test_signersWithTickets(features); + test_signersWithTags(features); } void @@ -1545,10 +1690,13 @@ class MultiSign_test : public beast::unit_test::suite using namespace jtx; auto const all = supported_amendments(); - // The reserve required on a signer list changes based on. - // featureMultiSignReserve. Test both with and without. - testAll(all - featureMultiSignReserve); - testAll(all | featureMultiSignReserve); + // The reserve required on a signer list changes based on + // featureMultiSignReserve. Limits on the number of signers + // changes based on featureExpandedSignerList. Test both with and + // without. + testAll(all - featureMultiSignReserve - featureExpandedSignerList); + testAll(all - featureExpandedSignerList); + testAll(all); test_amendmentTransition(); } }; diff --git a/src/test/app/NFTokenBurn_test.cpp b/src/test/app/NFTokenBurn_test.cpp index e40f4c839a7..00124731cb9 100644 --- a/src/test/app/NFTokenBurn_test.cpp +++ b/src/test/app/NFTokenBurn_test.cpp @@ -595,8 +595,11 @@ class NFTokenBurn_test : public beast::unit_test::suite run() override { using namespace test::jtx; - auto const sa = supported_amendments(); - testWithFeats(sa); + FeatureBitset const all{supported_amendments()}; + FeatureBitset const fixNFTDir{fixNFTokenDirV1}; + + testWithFeats(all - fixNFTDir); + testWithFeats(all); } }; diff --git a/src/test/app/NFTokenDir_test.cpp b/src/test/app/NFTokenDir_test.cpp index c19a8d0790a..ae7eeeaf603 100644 --- a/src/test/app/NFTokenDir_test.cpp +++ b/src/test/app/NFTokenDir_test.cpp @@ -17,6 +17,7 @@ */ //============================================================================== +#include #include #include #include @@ -95,6 +96,63 @@ class NFTokenDir_test : public beast::unit_test::suite } } + void + testConsecutiveNFTs(FeatureBitset features) + { + // It should be possible to store many consecutive NFTs. + testcase("Sequential NFTs"); + + using namespace test::jtx; + Env env{*this, features}; + + // A single minter tends not to mint numerically sequential NFTokens + // because the taxon cipher mixes things up. We can override the + // cipher, however, and mint many sequential NFTokens with no gaps + // between them. + // + // Here we'll simply mint 100 sequential NFTs. Then we'll create + // offers for them to verify that the ledger can find them. + + Account const issuer{"issuer"}; + Account const buyer{"buyer"}; + env.fund(XRP(10000), buyer, issuer); + env.close(); + + // Mint 100 sequential NFTs. Tweak the taxon so zero is always stored. + // That's what makes them sequential. + constexpr std::size_t nftCount = 100; + std::vector nftIDs; + nftIDs.reserve(nftCount); + for (int i = 0; i < nftCount; ++i) + { + std::uint32_t taxon = + toUInt32(nft::cipheredTaxon(i, nft::toTaxon(0))); + nftIDs.emplace_back( + token::getNextID(env, issuer, taxon, tfTransferable)); + env(token::mint(issuer, taxon), txflags(tfTransferable)); + env.close(); + } + + // Create an offer for each of the NFTs. This verifies that the ledger + // can find all of the minted NFTs. + std::vector offers; + for (uint256 const& nftID : nftIDs) + { + offers.emplace_back(keylet::nftoffer(issuer, env.seq(issuer)).key); + env(token::createOffer(issuer, nftID, XRP(0)), + txflags((tfSellNFToken))); + env.close(); + } + + // Buyer accepts all of the offers in reverse order. + std::reverse(offers.begin(), offers.end()); + for (uint256 const& offer : offers) + { + env(token::acceptSellOffer(buyer, offer)); + env.close(); + } + } + void testLopsidedSplits(FeatureBitset features) { @@ -307,17 +365,224 @@ class NFTokenDir_test : public beast::unit_test::suite "sp6JS7f14BuwFY8Mw6xCigaMwC6Dp", // 32. 0x309b67ed }; - // FUTURE TEST + // Run the test cases. + exerciseLopsided(splitAndAddToHi); + exerciseLopsided(splitAndAddToLo); + } + + void + testFixNFTokenDirV1(FeatureBitset features) + { + // Exercise a fix for an off-by-one in the creation of an NFTokenPage + // index. + testcase("fixNFTokenDirV1"); + + using namespace test::jtx; + + // When a single NFT page exceeds 32 entries, the code is inclined + // to split that page into two equal pieces. The new page is lower + // than the original. There was an off-by-one in the selection of + // the index for the new page. This test recreates the problem. + + // Lambda that exercises the split. + auto exerciseFixNFTokenDirV1 = + [this, + &features](std::initializer_list seeds) { + Env env{*this, features}; + + // Eventually all of the NFTokens will be owned by buyer. + Account const buyer{"buyer"}; + env.fund(XRP(10000), buyer); + env.close(); + + // Create accounts for all of the seeds and fund those accounts. + std::vector accounts; + accounts.reserve(seeds.size()); + for (std::string_view const& seed : seeds) + { + Account const& account = accounts.emplace_back( + Account::base58Seed, std::string(seed)); + env.fund(XRP(10000), account); + env.close(); + } + + // All of the accounts create one NFT and and offer that NFT to + // buyer. + std::vector nftIDs; + std::vector offers; + offers.reserve(accounts.size()); + for (Account const& account : accounts) + { + // Mint the NFT. + uint256 const& nftID = nftIDs.emplace_back( + token::getNextID(env, account, 0, tfTransferable)); + env(token::mint(account, 0), txflags(tfTransferable)); + env.close(); + + // Create an offer to give the NFT to buyer for free. + offers.emplace_back( + keylet::nftoffer(account, env.seq(account)).key); + env(token::createOffer(account, nftID, XRP(0)), + token::destination(buyer), + txflags((tfSellNFToken))); + } + env.close(); + + // buyer accepts all of the but the last. The last offer + // causes the page to split. + for (std::size_t i = 0; i < offers.size() - 1; ++i) + { + env(token::acceptSellOffer(buyer, offers[i])); + env.close(); + } + + // Here is the last offer. Without the fix accepting this + // offer causes tecINVARIANT_FAILED. With the fix the offer + // accept succeeds. + if (!features[fixNFTokenDirV1]) + { + env(token::acceptSellOffer(buyer, offers.back()), + ter(tecINVARIANT_FAILED)); + env.close(); + return; + } + env(token::acceptSellOffer(buyer, offers.back())); + env.close(); + + // This can be a good time to look at the NFT pages. + // printNFTPages(env, noisy); + + // Verify that all NFTs are owned by buyer and findable in the + // ledger by having buyer create sell offers for all of their + // NFTs. Attempting to sell an offer that the ledger can't find + // generates a non-tesSUCCESS error code. + for (uint256 const& nftID : nftIDs) + { + uint256 const offerID = + keylet::nftoffer(buyer, env.seq(buyer)).key; + env(token::createOffer(buyer, nftID, XRP(100)), + txflags(tfSellNFToken)); + env.close(); + + env(token::cancelOffer(buyer, {offerID})); + } + + // Verify that all the NFTs are owned by buyer. + Json::Value buyerNFTs = [&env, &buyer]() { + Json::Value params; + params[jss::account] = buyer.human(); + params[jss::type] = "state"; + return env.rpc("json", "account_nfts", to_string(params)); + }(); + + BEAST_EXPECT( + buyerNFTs[jss::result][jss::account_nfts].size() == + nftIDs.size()); + for (Json::Value const& ownedNFT : + buyerNFTs[jss::result][jss::account_nfts]) + { + uint256 ownedID; + BEAST_EXPECT(ownedID.parseHex( + ownedNFT[sfNFTokenID.jsonName].asString())); + auto const foundIter = + std::find(nftIDs.begin(), nftIDs.end(), ownedID); + + // Assuming we find the NFT, erase it so we know it's been + // found and can't be found again. + if (BEAST_EXPECT(foundIter != nftIDs.end())) + nftIDs.erase(foundIter); + } + + // All NFTs should now be accounted for, so nftIDs should be + // empty. + BEAST_EXPECT(nftIDs.empty()); + }; + // These seeds fill the last 17 entries of the initial page with // equivalent NFTs. The split should keep these together. + static std::initializer_list const seventeenHi{ + // These 16 need to be kept together by the implementation. + "sp6JS7f14BuwFY8Mw5EYu5z86hKDL", // 0. 0x399187e9 + "sp6JS7f14BuwFY8Mw5PUAMwc5ygd7", // 1. 0x399187e9 + "sp6JS7f14BuwFY8Mw5R3xUBcLSeTs", // 2. 0x399187e9 + "sp6JS7f14BuwFY8Mw5W6oS5sdC3oF", // 3. 0x399187e9 + "sp6JS7f14BuwFY8Mw5pYc3D9iuLcw", // 4. 0x399187e9 + "sp6JS7f14BuwFY8Mw5pfGVnhcdp3b", // 5. 0x399187e9 + "sp6JS7f14BuwFY8Mw6jS6RdEqXqrN", // 6. 0x399187e9 + "sp6JS7f14BuwFY8Mw6krt6AKbvRXW", // 7. 0x399187e9 + "sp6JS7f14BuwFY8Mw6mnVBQq7cAN2", // 8. 0x399187e9 + "sp6JS7f14BuwFY8Mw8ECJxPjmkufQ", // 9. 0x399187e9 + "sp6JS7f14BuwFY8Mw8asgzcceGWYm", // 10. 0x399187e9 + "sp6JS7f14BuwFY8MwF6J3FXnPCgL8", // 11. 0x399187e9 + "sp6JS7f14BuwFY8MwFEud2w5czv5q", // 12. 0x399187e9 + "sp6JS7f14BuwFY8MwFNxKVqJnx8P5", // 13. 0x399187e9 + "sp6JS7f14BuwFY8MwFnTCXg3eRidL", // 14. 0x399187e9 + "sp6JS7f14BuwFY8Mwj47hv1vrDge6", // 15. 0x399187e9 + + // These 17 need to be kept together by the implementation. + "sp6JS7f14BuwFY8MwjJCwYr9zSfAv", // 16. 0xabb11898 + "sp6JS7f14BuwFY8MwjYa5yLkgCLuT", // 17. 0xabb11898 + "sp6JS7f14BuwFY8MwjenxuJ3TH2Bc", // 18. 0xabb11898 + "sp6JS7f14BuwFY8MwjriN7Ui11NzB", // 19. 0xabb11898 + "sp6JS7f14BuwFY8Mwk3AuoJNSEo34", // 20. 0xabb11898 + "sp6JS7f14BuwFY8MwkT36hnRv8hTo", // 21. 0xabb11898 + "sp6JS7f14BuwFY8MwkTQixEXfi1Cr", // 22. 0xabb11898 + "sp6JS7f14BuwFY8MwkYJaZM1yTJBF", // 23. 0xabb11898 + "sp6JS7f14BuwFY8Mwkc4k1uo85qp2", // 24. 0xabb11898 + "sp6JS7f14BuwFY8Mwkf7cFhF1uuxx", // 25. 0xabb11898 + "sp6JS7f14BuwFY8MwmCK2un99wb4e", // 26. 0xabb11898 + "sp6JS7f14BuwFY8MwmETztNHYu2Bx", // 27. 0xabb11898 + "sp6JS7f14BuwFY8MwmJws9UwRASfR", // 28. 0xabb11898 + "sp6JS7f14BuwFY8MwoH5PQkGK8tEb", // 29. 0xabb11898 + "sp6JS7f14BuwFY8MwoVXtP2yCzjJV", // 30. 0xabb11898 + "sp6JS7f14BuwFY8MwobxRXA9vsTeX", // 31. 0xabb11898 + "sp6JS7f14BuwFY8Mwos3pc5Gb3ihU", // 32. 0xabb11898 + }; - // FUTURE TEST // These seeds fill the first entries of the initial page with // equivalent NFTs. The split should keep these together. + static std::initializer_list const seventeenLo{ + // These 17 need to be kept together by the implementation. + "sp6JS7f14BuwFY8Mw5EYu5z86hKDL", // 0. 0x399187e9 + "sp6JS7f14BuwFY8Mw5PUAMwc5ygd7", // 1. 0x399187e9 + "sp6JS7f14BuwFY8Mw5R3xUBcLSeTs", // 2. 0x399187e9 + "sp6JS7f14BuwFY8Mw5W6oS5sdC3oF", // 3. 0x399187e9 + "sp6JS7f14BuwFY8Mw5pYc3D9iuLcw", // 4. 0x399187e9 + "sp6JS7f14BuwFY8Mw5pfGVnhcdp3b", // 5. 0x399187e9 + "sp6JS7f14BuwFY8Mw6jS6RdEqXqrN", // 6. 0x399187e9 + "sp6JS7f14BuwFY8Mw6krt6AKbvRXW", // 7. 0x399187e9 + "sp6JS7f14BuwFY8Mw6mnVBQq7cAN2", // 8. 0x399187e9 + "sp6JS7f14BuwFY8Mw8ECJxPjmkufQ", // 9. 0x399187e9 + "sp6JS7f14BuwFY8Mw8asgzcceGWYm", // 10. 0x399187e9 + "sp6JS7f14BuwFY8MwF6J3FXnPCgL8", // 11. 0x399187e9 + "sp6JS7f14BuwFY8MwFEud2w5czv5q", // 12. 0x399187e9 + "sp6JS7f14BuwFY8MwFNxKVqJnx8P5", // 13. 0x399187e9 + "sp6JS7f14BuwFY8MwFnTCXg3eRidL", // 14. 0x399187e9 + "sp6JS7f14BuwFY8Mwj47hv1vrDge6", // 15. 0x399187e9 + "sp6JS7f14BuwFY8Mwj6TYekeeyukh", // 16. 0x399187e9 + + // These 16 need to be kept together by the implementation. + "sp6JS7f14BuwFY8MwjYa5yLkgCLuT", // 17. 0xabb11898 + "sp6JS7f14BuwFY8MwjenxuJ3TH2Bc", // 18. 0xabb11898 + "sp6JS7f14BuwFY8MwjriN7Ui11NzB", // 19. 0xabb11898 + "sp6JS7f14BuwFY8Mwk3AuoJNSEo34", // 20. 0xabb11898 + "sp6JS7f14BuwFY8MwkT36hnRv8hTo", // 21. 0xabb11898 + "sp6JS7f14BuwFY8MwkTQixEXfi1Cr", // 22. 0xabb11898 + "sp6JS7f14BuwFY8MwkYJaZM1yTJBF", // 23. 0xabb11898 + "sp6JS7f14BuwFY8Mwkc4k1uo85qp2", // 24. 0xabb11898 + "sp6JS7f14BuwFY8Mwkf7cFhF1uuxx", // 25. 0xabb11898 + "sp6JS7f14BuwFY8MwmCK2un99wb4e", // 26. 0xabb11898 + "sp6JS7f14BuwFY8MwmETztNHYu2Bx", // 27. 0xabb11898 + "sp6JS7f14BuwFY8MwmJws9UwRASfR", // 28. 0xabb11898 + "sp6JS7f14BuwFY8MwoH5PQkGK8tEb", // 29. 0xabb11898 + "sp6JS7f14BuwFY8MwoVXtP2yCzjJV", // 30. 0xabb11898 + "sp6JS7f14BuwFY8MwobxRXA9vsTeX", // 31. 0xabb11898 + "sp6JS7f14BuwFY8Mwos3pc5Gb3ihU", // 32. 0xabb11898 + }; // Run the test cases. - exerciseLopsided(splitAndAddToHi); - exerciseLopsided(splitAndAddToLo); + exerciseFixNFTokenDirV1(seventeenHi); + exerciseFixNFTokenDirV1(seventeenLo); } void @@ -338,23 +603,40 @@ class NFTokenDir_test : public beast::unit_test::suite // Here are 33 seeds that produce identical low 32-bits in their // corresponding AccountIDs. - // - // NOTE: We've not yet identified 33 AccountIDs that meet the - // requirements. At the moment 12 is the best we can do. We'll fill - // in the full count when they are available. static std::initializer_list const seeds{ - "sp6JS7f14BuwFY8Mw5G5vCrbxB3TZ", - "sp6JS7f14BuwFY8Mw5H6qyXhorcip", - "sp6JS7f14BuwFY8Mw5suWxsBQRqLx", - "sp6JS7f14BuwFY8Mw66gtwamvGgSg", - "sp6JS7f14BuwFY8Mw66iNV4PPcmyt", - "sp6JS7f14BuwFY8Mw68Qz2P58ybfE", - "sp6JS7f14BuwFY8Mw6AYtLXKzi2Bo", - "sp6JS7f14BuwFY8Mw6boCES4j62P2", - "sp6JS7f14BuwFY8Mw6kv7QDDv7wjw", - "sp6JS7f14BuwFY8Mw6mHXMvpBjjwg", - "sp6JS7f14BuwFY8Mw6qfGbznyYvVp", - "sp6JS7f14BuwFY8Mw6zg6qHKDfSoU", + "sp6JS7f14BuwFY8Mw5FnqmbciPvH6", // 0. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw5MBGbyMSsXLp", // 1. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw5S4PnDyBdKKm", // 2. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw6kcXpM2enE35", // 3. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw6tuuSMMwyJ44", // 4. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw8E8JWLQ1P8pt", // 5. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw8WwdgWkCHhEx", // 6. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw8XDUYvU6oGhQ", // 7. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw8ceVGL4M1zLQ", // 8. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw8fdSwLCZWDFd", // 9. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mw8zuF6Fg65i1E", // 10. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwF2k7bihVfqes", // 11. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwF6X24WXGn557", // 12. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwFMpn7strjekg", // 13. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwFSdy9sYVrwJs", // 14. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwFdMcLy9UkrXn", // 15. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwFdbwFm1AAboa", // 16. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwFdr5AhKThVtU", // 17. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwjFc3Q9YatvAw", // 18. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwjRXcNs1ozEXn", // 19. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwkQGUKL7v1FBt", // 20. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mwkamsoxx1wECt", // 21. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mwm3hus1dG6U8y", // 22. 0x9a8ebed3 + "sp6JS7f14BuwFY8Mwm589M8vMRpXF", // 23. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwmJTRJ4Fqz1A3", // 24. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwmRfy8fer4QbL", // 25. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwmkkFx1HtgWRx", // 26. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwmwP9JFdKa4PS", // 27. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwoXWJLB3ciHfo", // 28. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwoYc1gTtT2mWL", // 29. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwogXtHH7FNVoo", // 30. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwoqYoA9P8gf3r", // 31. 0x9a8ebed3 + "sp6JS7f14BuwFY8MwoujwMJofGnsA", // 32. 0x9a8ebed3 }; // Create accounts for all of the seeds and fund those accounts. @@ -396,15 +678,25 @@ class NFTokenDir_test : public beast::unit_test::suite BEAST_EXPECT(expectLowBits == (nftID & nft::pageMask)); } - // buyer accepts all of the offers. + // Remove one NFT and offer from the vectors. This offer is the one + // that will overflow the page. + nftIDs.pop_back(); + uint256 const offerForPageOverflow = offers.back(); + offers.pop_back(); + + // buyer accepts all of the offers but one. for (uint256 const& offer : offers) { env(token::acceptSellOffer(buyer, offer)); env.close(); } - // Verify that all NFTs are owned by buyer and findable in the - // ledger by having buyer create sell offers for all of their NFTs. + // buyer accepts the last offer which causes a page overflow. + env(token::acceptSellOffer(buyer, offerForPageOverflow), + ter(tecNO_SUITABLE_NFTOKEN_PAGE)); + + // Verify that all expected NFTs are owned by buyer and findable in + // the ledger by having buyer create sell offers for all of their NFTs. // Attempting to sell an offer that the ledger can't find generates // a non-tesSUCCESS error code. for (uint256 const& nftID : nftIDs) @@ -444,13 +736,332 @@ class NFTokenDir_test : public beast::unit_test::suite // All NFTs should now be accounted for, so nftIDs should be empty. BEAST_EXPECT(nftIDs.empty()); + + // Show that Without fixNFTokenDirV1 no more NFTs can be added to + // buyer. Also show that fixNFTokenDirV1 fixes the problem. + TER const expect = features[fixNFTokenDirV1] + ? static_cast(tesSUCCESS) + : static_cast(tecNO_SUITABLE_NFTOKEN_PAGE); + env(token::mint(buyer, 0), txflags(tfTransferable), ter(expect)); + env.close(); + } + + void + testConsecutivePacking(FeatureBitset features) + { + // We'll make a worst case scenario for NFT packing: + // + // 1. 33 accounts with identical low-32 bits mint 7 consecutive NFTs. + // 2. The taxon is manipulated to always be stored as zero. + // 3. A single account buys all 7x32 of the 33 NFTs. + // + // All of the NFTs should be acquired by the buyer. + // + // Lastly, none of the remaining NFTs should be acquirable by the + // buyer. They would cause page overflow. + + // This test collapses in a heap if fixNFTokenDirV1 is not enabled. + // If it is enabled just return so we skip the test. + if (!features[fixNFTokenDirV1]) + return; + + testcase("NFToken consecutive packing"); + + using namespace test::jtx; + + Env env{*this, features}; + + // Eventually all of the NFTokens will be owned by buyer. + Account const buyer{"buyer"}; + env.fund(XRP(10000), buyer); + env.close(); + + // Here are 33 seeds that produce identical low 32-bits in their + // corresponding AccountIDs. + static std::initializer_list const seeds{ + "sp6JS7f14BuwFY8Mw56vZeiBuhePx", // 0. 0x115d0525 + "sp6JS7f14BuwFY8Mw5BodF9tGuTUe", // 1. 0x115d0525 + "sp6JS7f14BuwFY8Mw5EnhC1cg84J7", // 2. 0x115d0525 + "sp6JS7f14BuwFY8Mw5P913Cunr2BK", // 3. 0x115d0525 + "sp6JS7f14BuwFY8Mw5Pru7eLo1XzT", // 4. 0x115d0525 + "sp6JS7f14BuwFY8Mw61SLUC8UX2m8", // 5. 0x115d0525 + "sp6JS7f14BuwFY8Mw6AsBF9TpeMpq", // 6. 0x115d0525 + "sp6JS7f14BuwFY8Mw84XqrBZkU2vE", // 7. 0x115d0525 + "sp6JS7f14BuwFY8Mw89oSU6dBk3KB", // 8. 0x115d0525 + "sp6JS7f14BuwFY8Mw89qUKCyDmyzj", // 9. 0x115d0525 + "sp6JS7f14BuwFY8Mw8GfqQ9VRZ8tm", // 10. 0x115d0525 + "sp6JS7f14BuwFY8Mw8LtW3VqrqMks", // 11. 0x115d0525 + "sp6JS7f14BuwFY8Mw8ZrAkJc2sHew", // 12. 0x115d0525 + "sp6JS7f14BuwFY8Mw8jpkYSNrD3ah", // 13. 0x115d0525 + "sp6JS7f14BuwFY8MwF2mshd786m3V", // 14. 0x115d0525 + "sp6JS7f14BuwFY8MwFHfXq9x5NbPY", // 15. 0x115d0525 + "sp6JS7f14BuwFY8MwFrjWq5LAB8NT", // 16. 0x115d0525 + "sp6JS7f14BuwFY8Mwj4asgSh6hQZd", // 17. 0x115d0525 + "sp6JS7f14BuwFY8Mwj7ipFfqBSRrE", // 18. 0x115d0525 + "sp6JS7f14BuwFY8MwjHqtcvGav8uW", // 19. 0x115d0525 + "sp6JS7f14BuwFY8MwjLp4sk5fmzki", // 20. 0x115d0525 + "sp6JS7f14BuwFY8MwjioHuYb3Ytkx", // 21. 0x115d0525 + "sp6JS7f14BuwFY8MwkRjHPXWi7fGN", // 22. 0x115d0525 + "sp6JS7f14BuwFY8MwkdVdPV3LjNN1", // 23. 0x115d0525 + "sp6JS7f14BuwFY8MwkxUtVY5AXZFk", // 24. 0x115d0525 + "sp6JS7f14BuwFY8Mwm4jQzdfTbY9F", // 25. 0x115d0525 + "sp6JS7f14BuwFY8MwmCucYAqNp4iF", // 26. 0x115d0525 + "sp6JS7f14BuwFY8Mwo2bgdFtxBzpF", // 27. 0x115d0525 + "sp6JS7f14BuwFY8MwoGwD7v4U6qBh", // 28. 0x115d0525 + "sp6JS7f14BuwFY8MwoUczqFADMoXi", // 29. 0x115d0525 + "sp6JS7f14BuwFY8MwoY1xZeGd3gAr", // 30. 0x115d0525 + "sp6JS7f14BuwFY8MwomVCbfkv4kYZ", // 31. 0x115d0525 + "sp6JS7f14BuwFY8MwoqbrPSr4z13F", // 32. 0x115d0525 + }; + + // Create accounts for all of the seeds and fund those accounts. + std::vector accounts; + accounts.reserve(seeds.size()); + for (std::string_view const& seed : seeds) + { + Account const& account = + accounts.emplace_back(Account::base58Seed, std::string(seed)); + env.fund(XRP(10000), account); + env.close(); + } + + // All of the accounts create seven consecutive NFTs and and offer + // those NFTs to buyer. + std::array, 7> nftIDsByPage; + for (auto& vec : nftIDsByPage) + vec.reserve(accounts.size()); + std::array, 7> offers; + for (auto& vec : offers) + vec.reserve(accounts.size()); + for (std::size_t i = 0; i < nftIDsByPage.size(); ++i) + { + for (Account const& account : accounts) + { + // Mint the NFT. Tweak the taxon so zero is always stored. + std::uint32_t taxon = + toUInt32(nft::cipheredTaxon(i, nft::toTaxon(0))); + + uint256 const& nftID = nftIDsByPage[i].emplace_back( + token::getNextID(env, account, taxon, tfTransferable)); + env(token::mint(account, taxon), txflags(tfTransferable)); + env.close(); + + // Create an offer to give the NFT to buyer for free. + offers[i].emplace_back( + keylet::nftoffer(account, env.seq(account)).key); + env(token::createOffer(account, nftID, XRP(0)), + token::destination(buyer), + txflags((tfSellNFToken))); + } + } + env.close(); + + // Verify that the low 96 bits of all generated NFTs of the same + // sequence is identical. + for (auto const& vec : nftIDsByPage) + { + uint256 const expectLowBits = vec.front() & nft::pageMask; + for (uint256 const& nftID : vec) + { + BEAST_EXPECT(expectLowBits == (nftID & nft::pageMask)); + } + } + + // Remove one NFT and offer from each of the vectors. These offers + // are the ones that will overflow the page. + std::vector overflowNFTs; + overflowNFTs.reserve(nftIDsByPage.size()); + std::vector overflowOffers; + overflowOffers.reserve(nftIDsByPage.size()); + + for (std::size_t i = 0; i < nftIDsByPage.size(); ++i) + { + overflowNFTs.push_back(nftIDsByPage[i].back()); + nftIDsByPage[i].pop_back(); + BEAST_EXPECT(nftIDsByPage[i].size() == seeds.size() - 1); + + overflowOffers.push_back(offers[i].back()); + offers[i].pop_back(); + BEAST_EXPECT(offers[i].size() == seeds.size() - 1); + } + + // buyer accepts all of the offers that won't cause an overflow. + // Fill the center and outsides first to exercise different boundary + // cases. + for (int i : std::initializer_list{3, 6, 0, 1, 2, 5, 4}) + { + for (uint256 const& offer : offers[i]) + { + env(token::acceptSellOffer(buyer, offer)); + env.close(); + } + } + + // buyer accepts the seven offers that would cause page overflows if + // the transaction succeeded. + for (uint256 const& offer : overflowOffers) + { + env(token::acceptSellOffer(buyer, offer), + ter(tecNO_SUITABLE_NFTOKEN_PAGE)); + env.close(); + } + + // Verify that all expected NFTs are owned by buyer and findable in + // the ledger by having buyer create sell offers for all of their NFTs. + // Attempting to sell an offer that the ledger can't find generates + // a non-tesSUCCESS error code. + for (auto const& vec : nftIDsByPage) + { + for (uint256 const& nftID : vec) + { + env(token::createOffer(buyer, nftID, XRP(100)), + txflags(tfSellNFToken)); + env.close(); + } + } + + // See what the account_objects command does with "nft_offer". + { + Json::Value ownedNftOffers(Json::arrayValue); + std::string marker; + do + { + Json::Value buyerOffers = [&env, &buyer, &marker]() { + Json::Value params; + params[jss::account] = buyer.human(); + params[jss::type] = jss::nft_offer; + + if (!marker.empty()) + params[jss::marker] = marker; + return env.rpc( + "json", "account_objects", to_string(params)); + }(); + + marker.clear(); + if (buyerOffers.isMember(jss::result)) + { + Json::Value& result = buyerOffers[jss::result]; + + if (result.isMember(jss::marker)) + marker = result[jss::marker].asString(); + + if (result.isMember(jss::account_objects)) + { + Json::Value& someOffers = result[jss::account_objects]; + for (std::size_t i = 0; i < someOffers.size(); ++i) + ownedNftOffers.append(someOffers[i]); + } + } + } while (!marker.empty()); + + // Verify there are as many offers are there are NFTs. + { + std::size_t totalOwnedNFTs = 0; + for (auto const& vec : nftIDsByPage) + totalOwnedNFTs += vec.size(); + BEAST_EXPECT(ownedNftOffers.size() == totalOwnedNFTs); + } + + // Cancel all the offers. + { + std::vector cancelOffers; + cancelOffers.reserve(ownedNftOffers.size()); + + for (auto const& offer : ownedNftOffers) + { + if (offer.isMember(jss::index)) + { + uint256 offerIndex; + if (offerIndex.parseHex(offer[jss::index].asString())) + cancelOffers.push_back(offerIndex); + } + } + env(token::cancelOffer(buyer, cancelOffers)); + env.close(); + } + + // account_objects should no longer return any "nft_offer"s. + Json::Value remainingOffers = [&env, &buyer]() { + Json::Value params; + params[jss::account] = buyer.human(); + params[jss::type] = jss::nft_offer; + + return env.rpc("json", "account_objects", to_string(params)); + }(); + BEAST_EXPECT( + remainingOffers.isMember(jss::result) && + remainingOffers[jss::result].isMember(jss::account_objects) && + remainingOffers[jss::result][jss::account_objects].size() == 0); + } + + // Verify that the ledger reports all of the NFTs owned by buyer. + // Use the account_nfts rpc call to get the values. + Json::Value ownedNFTs(Json::arrayValue); + std::string marker; + do + { + Json::Value buyerNFTs = [&env, &buyer, &marker]() { + Json::Value params; + params[jss::account] = buyer.human(); + params[jss::type] = "state"; + + if (!marker.empty()) + params[jss::marker] = marker; + return env.rpc("json", "account_nfts", to_string(params)); + }(); + + marker.clear(); + if (buyerNFTs.isMember(jss::result)) + { + Json::Value& result = buyerNFTs[jss::result]; + + if (result.isMember(jss::marker)) + marker = result[jss::marker].asString(); + + if (result.isMember(jss::account_nfts)) + { + Json::Value& someNFTs = result[jss::account_nfts]; + for (std::size_t i = 0; i < someNFTs.size(); ++i) + ownedNFTs.append(someNFTs[i]); + } + } + } while (!marker.empty()); + + // Copy all of the nftIDs into a set to make validation easier. + std::set allNftIDs; + for (auto& vec : nftIDsByPage) + allNftIDs.insert(vec.begin(), vec.end()); + + BEAST_EXPECT(ownedNFTs.size() == allNftIDs.size()); + + for (Json::Value const& ownedNFT : ownedNFTs) + { + if (ownedNFT.isMember(sfNFTokenID.jsonName)) + { + uint256 ownedID; + BEAST_EXPECT(ownedID.parseHex( + ownedNFT[sfNFTokenID.jsonName].asString())); + auto const foundIter = allNftIDs.find(ownedID); + + // Assuming we find the NFT, erase it so we know it's been found + // and can't be found again. + if (BEAST_EXPECT(foundIter != allNftIDs.end())) + allNftIDs.erase(foundIter); + } + } + + // All NFTs should now be accounted for, so allNftIDs should be empty. + BEAST_EXPECT(allNftIDs.empty()); } void testWithFeats(FeatureBitset features) { + testConsecutiveNFTs(features); testLopsidedSplits(features); + testFixNFTokenDirV1(features); testTooManyEquivalent(features); + testConsecutivePacking(features); } public: @@ -458,11 +1069,525 @@ class NFTokenDir_test : public beast::unit_test::suite run() override { using namespace test::jtx; - auto const sa = supported_amendments(); - testWithFeats(sa); + FeatureBitset const all{supported_amendments()}; + FeatureBitset const fixNFTDir{fixNFTokenDirV1}; + + testWithFeats(all - fixNFTDir); + testWithFeats(all); } }; BEAST_DEFINE_TESTSUITE_PRIO(NFTokenDir, tx, ripple, 1); } // namespace ripple + +// Seed that produces an account with the low-32 bits == 0xFFFFFFFF in +// case it is needed for future testing: +// +// sp6JS7f14BuwFY8MwFe95Vpi9Znjs +// + +// Sets of related accounts. +// +// Identifying the seeds of accounts that generate account IDs with the +// same low 32 bits takes a while. However several sets of accounts with +// that relationship have been located. In case these sets of accounts are +// needed for future testing scenarios they are recorded below. +#if 0 +34 account seeds that produce account IDs with low 32-bits 0x399187e9: + sp6JS7f14BuwFY8Mw5EYu5z86hKDL + sp6JS7f14BuwFY8Mw5PUAMwc5ygd7 + sp6JS7f14BuwFY8Mw5R3xUBcLSeTs + sp6JS7f14BuwFY8Mw5W6oS5sdC3oF + sp6JS7f14BuwFY8Mw5pYc3D9iuLcw + sp6JS7f14BuwFY8Mw5pfGVnhcdp3b + sp6JS7f14BuwFY8Mw6jS6RdEqXqrN + sp6JS7f14BuwFY8Mw6krt6AKbvRXW + sp6JS7f14BuwFY8Mw6mnVBQq7cAN2 + sp6JS7f14BuwFY8Mw8ECJxPjmkufQ + sp6JS7f14BuwFY8Mw8asgzcceGWYm + sp6JS7f14BuwFY8MwF6J3FXnPCgL8 + sp6JS7f14BuwFY8MwFEud2w5czv5q + sp6JS7f14BuwFY8MwFNxKVqJnx8P5 + sp6JS7f14BuwFY8MwFnTCXg3eRidL + sp6JS7f14BuwFY8Mwj47hv1vrDge6 + sp6JS7f14BuwFY8Mwj6TYekeeyukh + sp6JS7f14BuwFY8MwjFjsRDerz7jb + sp6JS7f14BuwFY8Mwjrj9mHTLBrcX + sp6JS7f14BuwFY8MwkKcJi3zMzAea + sp6JS7f14BuwFY8MwkYTDdnYRm9z4 + sp6JS7f14BuwFY8Mwkq8ei4D8uPNd + sp6JS7f14BuwFY8Mwm2pFruxbnJRd + sp6JS7f14BuwFY8MwmJV2ZnAjpC2g + sp6JS7f14BuwFY8MwmTFMPHQHfVYF + sp6JS7f14BuwFY8MwmkG2jXEgqiud + sp6JS7f14BuwFY8Mwms3xEh5tMDTw + sp6JS7f14BuwFY8MwmtipW4D8giZ9 + sp6JS7f14BuwFY8MwoRQBZm4KUUeE + sp6JS7f14BuwFY8MwoVey94QpXcrc + sp6JS7f14BuwFY8MwoZiuUoUTo3VG + sp6JS7f14BuwFY8MwonFFDLT4bHAZ + sp6JS7f14BuwFY8MwooGphD4hefBQ + sp6JS7f14BuwFY8MwoxDp3dmX6q5N + +34 account seeds that produce account IDs with low 32-bits 0x473f2c9a: + sp6JS7f14BuwFY8Mw53ktgqmv5Bmz + sp6JS7f14BuwFY8Mw5KPb2Kz7APFX + sp6JS7f14BuwFY8Mw5Xx4A6HRTPEE + sp6JS7f14BuwFY8Mw5y6qZFNAo358 + sp6JS7f14BuwFY8Mw6kdaBg1QrZfn + sp6JS7f14BuwFY8Mw8QmTfLMAZ5K1 + sp6JS7f14BuwFY8Mw8cbRRVcCEELr + sp6JS7f14BuwFY8Mw8gQvJebmxvDG + sp6JS7f14BuwFY8Mw8qPQurwu3P7Y + sp6JS7f14BuwFY8MwFS4PEVKmuPy5 + sp6JS7f14BuwFY8MwFUQM1rAsQ8tS + sp6JS7f14BuwFY8MwjJBZCkuwsRnM + sp6JS7f14BuwFY8MwjTdS8vZhX5E9 + sp6JS7f14BuwFY8MwjhSmWCbNhd25 + sp6JS7f14BuwFY8MwjwkpqwZsDBw9 + sp6JS7f14BuwFY8MwjyET4p6eqd5J + sp6JS7f14BuwFY8MwkMNAe4JhnG7E + sp6JS7f14BuwFY8MwkRRpnT93UWWS + sp6JS7f14BuwFY8MwkY9CvB22RvUe + sp6JS7f14BuwFY8Mwkhw9VxXqmTr7 + sp6JS7f14BuwFY8MwkmgaTat7eFa7 + sp6JS7f14BuwFY8Mwkq5SxGGv1oLH + sp6JS7f14BuwFY8MwmCBM5p5bTg6y + sp6JS7f14BuwFY8MwmmmXaVah64dB + sp6JS7f14BuwFY8Mwo7R7Cn614v9V + sp6JS7f14BuwFY8MwoCAG1na7GR2M + sp6JS7f14BuwFY8MwoDuPvJS4gG7C + sp6JS7f14BuwFY8MwoMMowSyPQLfy + sp6JS7f14BuwFY8MwoRqDiwTNsTBm + sp6JS7f14BuwFY8MwoWbBWtjpB7pg + sp6JS7f14BuwFY8Mwoi1AEeELGecF + sp6JS7f14BuwFY8MwopGP6Lo5byuj + sp6JS7f14BuwFY8MwoufkXGHp2VW8 + sp6JS7f14BuwFY8MwowGeagFQY32k + +34 account seeds that produce account IDs with low 32-bits 0x4d59f0d1: + sp6JS7f14BuwFY8Mw5CsNgH64zxK7 + sp6JS7f14BuwFY8Mw5Dg4wi2E344h + sp6JS7f14BuwFY8Mw5ErV949Zh2PX + sp6JS7f14BuwFY8Mw5p4nsQvEUE1s + sp6JS7f14BuwFY8Mw8LGnkbaP68Gn + sp6JS7f14BuwFY8Mw8aq6RCBc3iHo + sp6JS7f14BuwFY8Mw8bkWaGoKYT6e + sp6JS7f14BuwFY8Mw8qrCuXnzAXVj + sp6JS7f14BuwFY8MwFDKcPAHPHJTm + sp6JS7f14BuwFY8MwFUXJs4unfgNu + sp6JS7f14BuwFY8MwFj9Yv5LjshD9 + sp6JS7f14BuwFY8Mwj3H73nmq5UaC + sp6JS7f14BuwFY8MwjHSYShis1Yhk + sp6JS7f14BuwFY8MwjpfE1HVo8UP1 + sp6JS7f14BuwFY8Mwk6JE1SXUuiNc + sp6JS7f14BuwFY8MwkASgxEjEnFmU + sp6JS7f14BuwFY8MwkGNY8kg7R6RK + sp6JS7f14BuwFY8MwkHinNZ8SYBQu + sp6JS7f14BuwFY8MwkXLCW1hbhGya + sp6JS7f14BuwFY8MwkZ7mWrYK9YtU + sp6JS7f14BuwFY8MwkdFSqNB5DbKL + sp6JS7f14BuwFY8Mwm3jdBaCAx8H6 + sp6JS7f14BuwFY8Mwm3rk5hEwDRtY + sp6JS7f14BuwFY8Mwm77a2ULuwxu4 + sp6JS7f14BuwFY8MwmJpY7braKLaN + sp6JS7f14BuwFY8MwmKHQjG4XiZ6g + sp6JS7f14BuwFY8Mwmmv8Y3wyUDzs + sp6JS7f14BuwFY8MwmucFe1WgqtwG + sp6JS7f14BuwFY8Mwo1EjdU1bznZR + sp6JS7f14BuwFY8MwoJiqankkU5uR + sp6JS7f14BuwFY8MwoLnvQ6zdqbKw + sp6JS7f14BuwFY8MwoUGeJ319eu48 + sp6JS7f14BuwFY8MwoYf135tQjHP4 + sp6JS7f14BuwFY8MwogeF6M6SAyid + +34 account seeds that produce account IDs with low 32-bits 0xabb11898: + sp6JS7f14BuwFY8Mw5DgiYaNVSb1G + sp6JS7f14BuwFY8Mw5k6e94TMvuox + sp6JS7f14BuwFY8Mw5tTSN7KzYxiT + sp6JS7f14BuwFY8Mw61XV6m33utif + sp6JS7f14BuwFY8Mw87jKfrjiENCb + sp6JS7f14BuwFY8Mw8AFtxxFiRtJG + sp6JS7f14BuwFY8Mw8cosAVExzbeE + sp6JS7f14BuwFY8Mw8fmkQ63zE8WQ + sp6JS7f14BuwFY8Mw8iYSsxNbDN6D + sp6JS7f14BuwFY8Mw8wTZdGRJyyM1 + sp6JS7f14BuwFY8Mw8z7xEh3qBGr7 + sp6JS7f14BuwFY8MwFL5gpKQWZj7g + sp6JS7f14BuwFY8MwFPeZchXQnRZ5 + sp6JS7f14BuwFY8MwFSPxWSJVoU29 + sp6JS7f14BuwFY8MwFYyVkqX8kvRm + sp6JS7f14BuwFY8MwFcbVikUEwJvk + sp6JS7f14BuwFY8MwjF7NcZk1NctK + sp6JS7f14BuwFY8MwjJCwYr9zSfAv + sp6JS7f14BuwFY8MwjYa5yLkgCLuT + sp6JS7f14BuwFY8MwjenxuJ3TH2Bc + sp6JS7f14BuwFY8MwjriN7Ui11NzB + sp6JS7f14BuwFY8Mwk3AuoJNSEo34 + sp6JS7f14BuwFY8MwkT36hnRv8hTo + sp6JS7f14BuwFY8MwkTQixEXfi1Cr + sp6JS7f14BuwFY8MwkYJaZM1yTJBF + sp6JS7f14BuwFY8Mwkc4k1uo85qp2 + sp6JS7f14BuwFY8Mwkf7cFhF1uuxx + sp6JS7f14BuwFY8MwmCK2un99wb4e + sp6JS7f14BuwFY8MwmETztNHYu2Bx + sp6JS7f14BuwFY8MwmJws9UwRASfR + sp6JS7f14BuwFY8MwoH5PQkGK8tEb + sp6JS7f14BuwFY8MwoVXtP2yCzjJV + sp6JS7f14BuwFY8MwobxRXA9vsTeX + sp6JS7f14BuwFY8Mwos3pc5Gb3ihU + +34 account seeds that produce account IDs with low 32-bits 0xce627322: + sp6JS7f14BuwFY8Mw5Ck6i83pGNh3 + sp6JS7f14BuwFY8Mw5FKuwTxjAdH1 + sp6JS7f14BuwFY8Mw5FVKkEn6TkLH + sp6JS7f14BuwFY8Mw5NbQwLwHDd5v + sp6JS7f14BuwFY8Mw5X1dbz3msZaZ + sp6JS7f14BuwFY8Mw6qv6qaXNeP74 + sp6JS7f14BuwFY8Mw81SXagUeutCw + sp6JS7f14BuwFY8Mw84Ph7Qa8kwwk + sp6JS7f14BuwFY8Mw8Hp4gFyU3Qko + sp6JS7f14BuwFY8Mw8Kt8bAKredSx + sp6JS7f14BuwFY8Mw8XHK3VKRQ7v7 + sp6JS7f14BuwFY8Mw8eGyWxZGHY6v + sp6JS7f14BuwFY8Mw8iU5CLyHVcD2 + sp6JS7f14BuwFY8Mw8u3Zr26Ar914 + sp6JS7f14BuwFY8MwF2Kcdxtjzjv8 + sp6JS7f14BuwFY8MwFLmPWb6rbxNg + sp6JS7f14BuwFY8MwFUu8s7UVuxuJ + sp6JS7f14BuwFY8MwFYBaatwHxAJ8 + sp6JS7f14BuwFY8Mwjg6hFkeHwoqG + sp6JS7f14BuwFY8MwjjycJojy2ufk + sp6JS7f14BuwFY8MwkEWoxcSKGPXv + sp6JS7f14BuwFY8MwkMe7wLkEUsQT + sp6JS7f14BuwFY8MwkvyKLaPUc4FS + sp6JS7f14BuwFY8Mwm8doqXPKZmVQ + sp6JS7f14BuwFY8Mwm9r3No8yQ8Tx + sp6JS7f14BuwFY8Mwm9w6dks68W9B + sp6JS7f14BuwFY8MwmMPrv9sCdbpS + sp6JS7f14BuwFY8MwmPAvs3fcQNja + sp6JS7f14BuwFY8MwmS5jasapfcnJ + sp6JS7f14BuwFY8MwmU2L3qJEhnuA + sp6JS7f14BuwFY8MwoAQYmiBnW7fM + sp6JS7f14BuwFY8MwoBkkkXrPmkKF + sp6JS7f14BuwFY8MwonfmxPo6tkvC + sp6JS7f14BuwFY8MwouZFwhiNcYq6 + +34 account seeds that produce account IDs with low 32-bits 0xe29643e8: + sp6JS7f14BuwFY8Mw5EfAavcXAh2k + sp6JS7f14BuwFY8Mw5LhFjLkFSCVF + sp6JS7f14BuwFY8Mw5bRfEv5HgdBh + sp6JS7f14BuwFY8Mw5d6sPcKzypKN + sp6JS7f14BuwFY8Mw5rcqDtk1fACP + sp6JS7f14BuwFY8Mw5xkxRq1Notzv + sp6JS7f14BuwFY8Mw66fbkdw5WYmt + sp6JS7f14BuwFY8Mw6diEG8sZ7Fx7 + sp6JS7f14BuwFY8Mw6v2r1QhG7xc1 + sp6JS7f14BuwFY8Mw6zP6DHCTx2Fd + sp6JS7f14BuwFY8Mw8B3n39JKuFkk + sp6JS7f14BuwFY8Mw8FmBvqYw7uqn + sp6JS7f14BuwFY8Mw8KEaftb1eRwu + sp6JS7f14BuwFY8Mw8WJ1qKkegj9N + sp6JS7f14BuwFY8Mw8r8cAZEkq2BS + sp6JS7f14BuwFY8MwFKPxxwF65gZh + sp6JS7f14BuwFY8MwFKhaF8APcN5H + sp6JS7f14BuwFY8MwFN2buJn4BgYC + sp6JS7f14BuwFY8MwFUTe175MjP3x + sp6JS7f14BuwFY8MwFZhmRDb53NNb + sp6JS7f14BuwFY8MwFa2Azn5nU2WS + sp6JS7f14BuwFY8MwjNNt91hwgkn7 + sp6JS7f14BuwFY8MwjdiYt6ChACe7 + sp6JS7f14BuwFY8Mwk5qFVQ48Mmr9 + sp6JS7f14BuwFY8MwkGvCj7pNf1zG + sp6JS7f14BuwFY8MwkY9UcN2D2Fzs + sp6JS7f14BuwFY8MwkpGvSk9G9RyT + sp6JS7f14BuwFY8MwmGQ7nJf1eEzV + sp6JS7f14BuwFY8MwmQLjGsYdyAmV + sp6JS7f14BuwFY8MwmZ8usztKvikT + sp6JS7f14BuwFY8MwobyMLC2hQdFR + sp6JS7f14BuwFY8MwoiRtwUecZeJ5 + sp6JS7f14BuwFY8MwojHjKsUzj1KJ + sp6JS7f14BuwFY8Mwop29anGAjidU + +33 account seeds that produce account IDs with low 32-bits 0x115d0525: + sp6JS7f14BuwFY8Mw56vZeiBuhePx + sp6JS7f14BuwFY8Mw5BodF9tGuTUe + sp6JS7f14BuwFY8Mw5EnhC1cg84J7 + sp6JS7f14BuwFY8Mw5P913Cunr2BK + sp6JS7f14BuwFY8Mw5Pru7eLo1XzT + sp6JS7f14BuwFY8Mw61SLUC8UX2m8 + sp6JS7f14BuwFY8Mw6AsBF9TpeMpq + sp6JS7f14BuwFY8Mw84XqrBZkU2vE + sp6JS7f14BuwFY8Mw89oSU6dBk3KB + sp6JS7f14BuwFY8Mw89qUKCyDmyzj + sp6JS7f14BuwFY8Mw8GfqQ9VRZ8tm + sp6JS7f14BuwFY8Mw8LtW3VqrqMks + sp6JS7f14BuwFY8Mw8ZrAkJc2sHew + sp6JS7f14BuwFY8Mw8jpkYSNrD3ah + sp6JS7f14BuwFY8MwF2mshd786m3V + sp6JS7f14BuwFY8MwFHfXq9x5NbPY + sp6JS7f14BuwFY8MwFrjWq5LAB8NT + sp6JS7f14BuwFY8Mwj4asgSh6hQZd + sp6JS7f14BuwFY8Mwj7ipFfqBSRrE + sp6JS7f14BuwFY8MwjHqtcvGav8uW + sp6JS7f14BuwFY8MwjLp4sk5fmzki + sp6JS7f14BuwFY8MwjioHuYb3Ytkx + sp6JS7f14BuwFY8MwkRjHPXWi7fGN + sp6JS7f14BuwFY8MwkdVdPV3LjNN1 + sp6JS7f14BuwFY8MwkxUtVY5AXZFk + sp6JS7f14BuwFY8Mwm4jQzdfTbY9F + sp6JS7f14BuwFY8MwmCucYAqNp4iF + sp6JS7f14BuwFY8Mwo2bgdFtxBzpF + sp6JS7f14BuwFY8MwoGwD7v4U6qBh + sp6JS7f14BuwFY8MwoUczqFADMoXi + sp6JS7f14BuwFY8MwoY1xZeGd3gAr + sp6JS7f14BuwFY8MwomVCbfkv4kYZ + sp6JS7f14BuwFY8MwoqbrPSr4z13F + +33 account seeds that produce account IDs with low 32-bits 0x304033aa: + sp6JS7f14BuwFY8Mw5DaUP9agF5e1 + sp6JS7f14BuwFY8Mw5ohbtmPN4yGN + sp6JS7f14BuwFY8Mw5rRsA5fcoTAQ + sp6JS7f14BuwFY8Mw6zpYHMY3m6KT + sp6JS7f14BuwFY8Mw86BzQq4sTnoW + sp6JS7f14BuwFY8Mw8CCpnfvmGdV7 + sp6JS7f14BuwFY8Mw8DRjUDaBcFco + sp6JS7f14BuwFY8Mw8cL7GPo3zZN7 + sp6JS7f14BuwFY8Mw8y6aeYVtH6qt + sp6JS7f14BuwFY8MwFZR3PtVTCdUH + sp6JS7f14BuwFY8MwFcdcdbgz7m3s + sp6JS7f14BuwFY8MwjdnJDiUxEBRR + sp6JS7f14BuwFY8MwjhxWgSntqrFe + sp6JS7f14BuwFY8MwjrSHEhZ8CUM1 + sp6JS7f14BuwFY8MwjzkEeSTc9ZYf + sp6JS7f14BuwFY8MwkBZSk9JhaeCB + sp6JS7f14BuwFY8MwkGfwNY4i2iiU + sp6JS7f14BuwFY8MwknjtZd2oU2Ff + sp6JS7f14BuwFY8Mwkszsqd3ok9NE + sp6JS7f14BuwFY8Mwm58A81MAMvgZ + sp6JS7f14BuwFY8MwmiPTWysuDJCH + sp6JS7f14BuwFY8MwmxhiNeLfD76r + sp6JS7f14BuwFY8Mwo7SPdkwpGrFH + sp6JS7f14BuwFY8MwoANq4F1Sj3qH + sp6JS7f14BuwFY8MwoVjcHufAkd6L + sp6JS7f14BuwFY8MwoVxHBXdaxzhm + sp6JS7f14BuwFY8MwoZ2oTjBNfLpm + sp6JS7f14BuwFY8Mwoc9swzyotFVD + sp6JS7f14BuwFY8MwogMqVRwVEcQ9 + sp6JS7f14BuwFY8MwohMm7WxwnFqH + sp6JS7f14BuwFY8MwopUcpZHuF8BH + sp6JS7f14BuwFY8Mwor6rW6SS7tiB + sp6JS7f14BuwFY8MwoxyaqYz4Ngsb + +33 account seeds that produce account IDs with low 32-bits 0x42d4e09c: + sp6JS7f14BuwFY8Mw58NSZH9EaUxQ + sp6JS7f14BuwFY8Mw5JByk1pgPpL7 + sp6JS7f14BuwFY8Mw5YrJJuXnkHVB + sp6JS7f14BuwFY8Mw5kZe2ZzNSnKR + sp6JS7f14BuwFY8Mw6eXHTsbwi1U7 + sp6JS7f14BuwFY8Mw6gqN7HHDDKSh + sp6JS7f14BuwFY8Mw6zw8L1sSSR53 + sp6JS7f14BuwFY8Mw8E4WqSKKbksy + sp6JS7f14BuwFY8MwF3V9gemqJtND + sp6JS7f14BuwFY8Mwj4j46LHWZuY6 + sp6JS7f14BuwFY8MwjF5i8vh4Ezjy + sp6JS7f14BuwFY8MwjJZpEKgMpUAt + sp6JS7f14BuwFY8MwjWL7LfnzNUuh + sp6JS7f14BuwFY8Mwk7Y1csGuqAhX + sp6JS7f14BuwFY8MwkB1HVH17hN5W + sp6JS7f14BuwFY8MwkBntH7BZZupu + sp6JS7f14BuwFY8MwkEy4rMbNHG9P + sp6JS7f14BuwFY8MwkKz4LYesZeiN + sp6JS7f14BuwFY8MwkUrXyo9gMDPM + sp6JS7f14BuwFY8MwkV2hySsxej1G + sp6JS7f14BuwFY8MwkozhTVN12F9C + sp6JS7f14BuwFY8MwkpkzGB3sFJw5 + sp6JS7f14BuwFY8Mwks3zDZLGrhdn + sp6JS7f14BuwFY8MwktG1KCS7L2wW + sp6JS7f14BuwFY8Mwm1jVFsafwcYx + sp6JS7f14BuwFY8Mwm8hmrU6g5Wd6 + sp6JS7f14BuwFY8MwmFvstfRF7e2f + sp6JS7f14BuwFY8MwmeRohi6m5fs8 + sp6JS7f14BuwFY8MwmmU96RHUaRZL + sp6JS7f14BuwFY8MwoDFzteYqaUh4 + sp6JS7f14BuwFY8MwoPkTf5tDykPF + sp6JS7f14BuwFY8MwoSbMaDtiMoDN + sp6JS7f14BuwFY8MwoVL1vY1CysjR + +33 account seeds that produce account IDs with low 32-bits 0x9a8ebed3: + sp6JS7f14BuwFY8Mw5FnqmbciPvH6 + sp6JS7f14BuwFY8Mw5MBGbyMSsXLp + sp6JS7f14BuwFY8Mw5S4PnDyBdKKm + sp6JS7f14BuwFY8Mw6kcXpM2enE35 + sp6JS7f14BuwFY8Mw6tuuSMMwyJ44 + sp6JS7f14BuwFY8Mw8E8JWLQ1P8pt + sp6JS7f14BuwFY8Mw8WwdgWkCHhEx + sp6JS7f14BuwFY8Mw8XDUYvU6oGhQ + sp6JS7f14BuwFY8Mw8ceVGL4M1zLQ + sp6JS7f14BuwFY8Mw8fdSwLCZWDFd + sp6JS7f14BuwFY8Mw8zuF6Fg65i1E + sp6JS7f14BuwFY8MwF2k7bihVfqes + sp6JS7f14BuwFY8MwF6X24WXGn557 + sp6JS7f14BuwFY8MwFMpn7strjekg + sp6JS7f14BuwFY8MwFSdy9sYVrwJs + sp6JS7f14BuwFY8MwFdMcLy9UkrXn + sp6JS7f14BuwFY8MwFdbwFm1AAboa + sp6JS7f14BuwFY8MwFdr5AhKThVtU + sp6JS7f14BuwFY8MwjFc3Q9YatvAw + sp6JS7f14BuwFY8MwjRXcNs1ozEXn + sp6JS7f14BuwFY8MwkQGUKL7v1FBt + sp6JS7f14BuwFY8Mwkamsoxx1wECt + sp6JS7f14BuwFY8Mwm3hus1dG6U8y + sp6JS7f14BuwFY8Mwm589M8vMRpXF + sp6JS7f14BuwFY8MwmJTRJ4Fqz1A3 + sp6JS7f14BuwFY8MwmRfy8fer4QbL + sp6JS7f14BuwFY8MwmkkFx1HtgWRx + sp6JS7f14BuwFY8MwmwP9JFdKa4PS + sp6JS7f14BuwFY8MwoXWJLB3ciHfo + sp6JS7f14BuwFY8MwoYc1gTtT2mWL + sp6JS7f14BuwFY8MwogXtHH7FNVoo + sp6JS7f14BuwFY8MwoqYoA9P8gf3r + sp6JS7f14BuwFY8MwoujwMJofGnsA + +33 account seeds that produce account IDs with low 32-bits 0xa1dcea4a: + sp6JS7f14BuwFY8Mw5Ccov2N36QTy + sp6JS7f14BuwFY8Mw5CuSemVb5p7w + sp6JS7f14BuwFY8Mw5Ep8wpsTfpSz + sp6JS7f14BuwFY8Mw5WtutJc2H45M + sp6JS7f14BuwFY8Mw6vsDeaSKeUJZ + sp6JS7f14BuwFY8Mw83t5BPWUAzzF + sp6JS7f14BuwFY8Mw8FYGnK35mgkV + sp6JS7f14BuwFY8Mw8huo1x5pfKKJ + sp6JS7f14BuwFY8Mw8mPStxfMDrZa + sp6JS7f14BuwFY8Mw8yC3A7aQJytK + sp6JS7f14BuwFY8MwFCWCDmo9o3t8 + sp6JS7f14BuwFY8MwFjapa4gKxPhR + sp6JS7f14BuwFY8Mwj8CWtG29uw71 + sp6JS7f14BuwFY8MwjHyU5KpEMLVT + sp6JS7f14BuwFY8MwjMZSN7LZuWD8 + sp6JS7f14BuwFY8Mwja2TXJNBhKHU + sp6JS7f14BuwFY8Mwjf3xNTopHKTF + sp6JS7f14BuwFY8Mwjn5RAhedPeuM + sp6JS7f14BuwFY8MwkJdr4d6QoE8K + sp6JS7f14BuwFY8MwkmBryo3SUoLm + sp6JS7f14BuwFY8MwkrPdsc4tR8yw + sp6JS7f14BuwFY8Mwkttjcw2a65Fi + sp6JS7f14BuwFY8Mwm19n3rSaNx5S + sp6JS7f14BuwFY8Mwm3ryr4Xp2aQX + sp6JS7f14BuwFY8MwmBnDmgnJLB6B + sp6JS7f14BuwFY8MwmHgPjzrYjthq + sp6JS7f14BuwFY8MwmeV55DAnWKdd + sp6JS7f14BuwFY8Mwo49hK6BGrauT + sp6JS7f14BuwFY8Mwo56vfKY9aoWu + sp6JS7f14BuwFY8MwoU7tTTXLQTrh + sp6JS7f14BuwFY8MwoXpogSF2KaZB + sp6JS7f14BuwFY8MwoY9JYQAR16pc + sp6JS7f14BuwFY8MwoozLzKNAEXKM + +33 account seeds that produce account IDs with low 32-bits 0xbd2116db: + sp6JS7f14BuwFY8Mw5GrpkmPuA3Bw + sp6JS7f14BuwFY8Mw5r1sLoQJZDc6 + sp6JS7f14BuwFY8Mw68zzRmezLdd6 + sp6JS7f14BuwFY8Mw6jDSyaiF1mRp + sp6JS7f14BuwFY8Mw813wU9u5D6Uh + sp6JS7f14BuwFY8Mw8BBvpf2JFGoJ + sp6JS7f14BuwFY8Mw8F7zXxAiT263 + sp6JS7f14BuwFY8Mw8XG7WuVGHP2N + sp6JS7f14BuwFY8Mw8eyWrcz91cz6 + sp6JS7f14BuwFY8Mw8yNVKFVYyk9u + sp6JS7f14BuwFY8MwF2oA6ePqvZWP + sp6JS7f14BuwFY8MwF9VkcSNh3keq + sp6JS7f14BuwFY8MwFYsMWajgEf2j + sp6JS7f14BuwFY8Mwj3Gu43jYoJ4n + sp6JS7f14BuwFY8MwjJ5iRmYDHrW4 + sp6JS7f14BuwFY8MwjaUSSga93CiM + sp6JS7f14BuwFY8MwjxgLh2FY4Lvt + sp6JS7f14BuwFY8Mwk9hQdNZUgmTB + sp6JS7f14BuwFY8MwkcMXqtFp1sMx + sp6JS7f14BuwFY8MwkzZCDc56jsUB + sp6JS7f14BuwFY8Mwm5Zz7fP24Qym + sp6JS7f14BuwFY8MwmDWqizXSoJRG + sp6JS7f14BuwFY8MwmKHmkNYdMqqi + sp6JS7f14BuwFY8MwmRfAWHxWpGNK + sp6JS7f14BuwFY8MwmjCdXwyhphZ1 + sp6JS7f14BuwFY8MwmmukDAm1w6FL + sp6JS7f14BuwFY8Mwmmz2SzaR9TRH + sp6JS7f14BuwFY8Mwmz2z5mKHXzfn + sp6JS7f14BuwFY8Mwo2xNe5629r5k + sp6JS7f14BuwFY8MwoKy8tZxZrfJw + sp6JS7f14BuwFY8MwoLyQ9aMsq8Dm + sp6JS7f14BuwFY8MwoqqYkewuyZck + sp6JS7f14BuwFY8MwouvvhREVp6Pp + +33 account seeds that produce account IDs with low 32-bits 0xd80df065: + sp6JS7f14BuwFY8Mw5B7ERyhAfgHA + sp6JS7f14BuwFY8Mw5VuW3cF7bm2v + sp6JS7f14BuwFY8Mw5py3t1j7YbFT + sp6JS7f14BuwFY8Mw5qc84SzB6RHr + sp6JS7f14BuwFY8Mw5vGHW1G1hAy8 + sp6JS7f14BuwFY8Mw6gVa8TYukws6 + sp6JS7f14BuwFY8Mw8K9w1RoUAv1w + sp6JS7f14BuwFY8Mw8KvKtB7787CA + sp6JS7f14BuwFY8Mw8Y7WhRbuFzRq + sp6JS7f14BuwFY8Mw8cipw7inRmMn + sp6JS7f14BuwFY8MwFM5fAUNLNB13 + sp6JS7f14BuwFY8MwFSe1zAsht3X3 + sp6JS7f14BuwFY8MwFYNdigqQuHZM + sp6JS7f14BuwFY8MwjWkejj7V4V5Q + sp6JS7f14BuwFY8Mwjd2JGpsjvynq + sp6JS7f14BuwFY8Mwjg1xkducn751 + sp6JS7f14BuwFY8Mwjsp6LnaJvL1W + sp6JS7f14BuwFY8MwjvSbLc9593yH + sp6JS7f14BuwFY8Mwjw2h5wx7U6vZ + sp6JS7f14BuwFY8MwjxKUjtRsmPLH + sp6JS7f14BuwFY8Mwk1Yy8ginDfqv + sp6JS7f14BuwFY8Mwk2HrWhWwZP12 + sp6JS7f14BuwFY8Mwk4SsqiexvpWs + sp6JS7f14BuwFY8Mwk66zCs5ACpE6 + sp6JS7f14BuwFY8MwkCwx6vY97Nwh + sp6JS7f14BuwFY8MwknrbjnhTTWU8 + sp6JS7f14BuwFY8MwkokDy2ShRzQx + sp6JS7f14BuwFY8Mwm3BxnRPNxsuu + sp6JS7f14BuwFY8MwmY9EWdQQsFVr + sp6JS7f14BuwFY8MwmYTWjrDhmk8S + sp6JS7f14BuwFY8Mwo9skXt9Y5BVS + sp6JS7f14BuwFY8MwoZYKZybJ1Crp + sp6JS7f14BuwFY8MwoyXqkhySfSmF + +33 account seeds that produce account IDs with low 32-bits 0xe2e44294: + sp6JS7f14BuwFY8Mw53dmvTgNtBwi + sp6JS7f14BuwFY8Mw5Wrxsqn6WrXW + sp6JS7f14BuwFY8Mw5fGDT31RCXgC + sp6JS7f14BuwFY8Mw5nKRkubwrLWM + sp6JS7f14BuwFY8Mw5nXMajwKjriB + sp6JS7f14BuwFY8Mw5xZybggrC9NG + sp6JS7f14BuwFY8Mw5xea8f6dBMV5 + sp6JS7f14BuwFY8Mw5zDGofAHy5Lb + sp6JS7f14BuwFY8Mw6eado41rQNVG + sp6JS7f14BuwFY8Mw6yqKXQsQJPuU + sp6JS7f14BuwFY8Mw83MSN4FDzSGH + sp6JS7f14BuwFY8Mw8B3pUbzQqHe2 + sp6JS7f14BuwFY8Mw8WwRLnhBRvfk + sp6JS7f14BuwFY8Mw8hDBpKbpJwJX + sp6JS7f14BuwFY8Mw8jggRSZACe7M + sp6JS7f14BuwFY8Mw8mJRpU3qWbwC + sp6JS7f14BuwFY8MwFDnVozykN21u + sp6JS7f14BuwFY8MwFGGRGY9fctgv + sp6JS7f14BuwFY8MwjKznfChH9DQb + sp6JS7f14BuwFY8MwjbC5GvngRCk6 + sp6JS7f14BuwFY8Mwk3Lb7FPe1629 + sp6JS7f14BuwFY8MwkCeS41BwVrBD + sp6JS7f14BuwFY8MwkDnnvRyuWJ7d + sp6JS7f14BuwFY8MwkbkRNnzDEFpf + sp6JS7f14BuwFY8MwkiNhaVhGNk6v + sp6JS7f14BuwFY8Mwm1X4UJXRZx3p + sp6JS7f14BuwFY8Mwm7da9q5vfq7J + sp6JS7f14BuwFY8MwmPLqfBPrHw5H + sp6JS7f14BuwFY8MwmbJpxvVjEwm2 + sp6JS7f14BuwFY8MwoAVeA7ka37cD + sp6JS7f14BuwFY8MwoTFFTAwFKmVM + sp6JS7f14BuwFY8MwoYsne51VpDE3 + sp6JS7f14BuwFY8MwohLVnU1VTk5h + +#endif // 0 diff --git a/src/test/app/NFToken_test.cpp b/src/test/app/NFToken_test.cpp index 40dfe2fe35c..7dd4a781286 100644 --- a/src/test/app/NFToken_test.cpp +++ b/src/test/app/NFToken_test.cpp @@ -482,15 +482,14 @@ class NFToken_test : public beast::unit_test::suite if (replacement->getFieldU32(sfMintedNFTokens) != 1) return false; // Unexpected test conditions. - // Now replace the sfMintedNFTokens with its maximum value. - (*replacement)[sfMintedNFTokens] = - std::numeric_limits::max(); + // Now replace sfMintedNFTokens with the largest valid value. + (*replacement)[sfMintedNFTokens] = 0xFFFF'FFFE; view.rawReplace(replacement); return true; }); - // alice should not be able to mint any tokens because she has already - // minted the maximum allowed by a single account. + // See whether alice is at the boundary that causes an error. + env(token::mint(alice, 0u), ter(tesSUCCESS)); env(token::mint(alice, 0u), ter(tecMAX_SEQUENCE_REACHED)); } @@ -4069,6 +4068,87 @@ class NFToken_test : public beast::unit_test::suite } } + void + testNFTokenOfferOwner(FeatureBitset features) + { + // Verify the Owner field of an offer behaves as expected. + testcase("NFToken offer owner"); + + using namespace test::jtx; + + Env env{*this, features}; + + Account const issuer{"issuer"}; + Account const buyer1{"buyer1"}; + Account const buyer2{"buyer2"}; + env.fund(XRP(10000), issuer, buyer1, buyer2); + env.close(); + + // issuer creates an NFT. + uint256 const nftId{token::getNextID(env, issuer, 0u, tfTransferable)}; + env(token::mint(issuer, 0u), txflags(tfTransferable)); + env.close(); + + // Prove that issuer now owns nftId. + BEAST_EXPECT(nftCount(env, issuer) == 1); + BEAST_EXPECT(nftCount(env, buyer1) == 0); + BEAST_EXPECT(nftCount(env, buyer2) == 0); + + // Both buyer1 and buyer2 create buy offers for nftId. + uint256 const buyer1OfferIndex = + keylet::nftoffer(buyer1, env.seq(buyer1)).key; + env(token::createOffer(buyer1, nftId, XRP(100)), token::owner(issuer)); + uint256 const buyer2OfferIndex = + keylet::nftoffer(buyer2, env.seq(buyer2)).key; + env(token::createOffer(buyer2, nftId, XRP(100)), token::owner(issuer)); + env.close(); + + // Lambda that counts the number of buy offers for a given NFT. + auto nftBuyOfferCount = [&env](uint256 const& nftId) -> std::size_t { + // We know that in this case not very many offers will be + // returned, so we skip the marker stuff. + Json::Value params; + params[jss::nft_id] = to_string(nftId); + Json::Value buyOffers = + env.rpc("json", "nft_buy_offers", to_string(params)); + + if (buyOffers.isMember(jss::result) && + buyOffers[jss::result].isMember(jss::offers)) + return buyOffers[jss::result][jss::offers].size(); + + return 0; + }; + + // Show there are two buy offers for nftId. + BEAST_EXPECT(nftBuyOfferCount(nftId) == 2); + + // issuer accepts buyer1's offer. + env(token::acceptBuyOffer(issuer, buyer1OfferIndex)); + env.close(); + + // Prove that buyer1 now owns nftId. + BEAST_EXPECT(nftCount(env, issuer) == 0); + BEAST_EXPECT(nftCount(env, buyer1) == 1); + BEAST_EXPECT(nftCount(env, buyer2) == 0); + + // buyer1's offer was consumed, but buyer2's offer is still in the + // ledger. + BEAST_EXPECT(nftBuyOfferCount(nftId) == 1); + + // buyer1 can now accept buyer2's offer, even though buyer2's + // NFTokenCreateOffer transaction specified the NFT Owner as issuer. + env(token::acceptBuyOffer(buyer1, buyer2OfferIndex)); + env.close(); + + // Prove that buyer2 now owns nftId. + BEAST_EXPECT(nftCount(env, issuer) == 0); + BEAST_EXPECT(nftCount(env, buyer1) == 0); + BEAST_EXPECT(nftCount(env, buyer2) == 1); + + // All of the NFTokenOffers are now consumed. + BEAST_EXPECT(nftBuyOfferCount(nftId) == 0); + } + void testNFTokenWithTickets(FeatureBitset features) { @@ -4248,6 +4328,235 @@ class NFToken_test : public beast::unit_test::suite env.close(); } + void + testNftXxxOffers(FeatureBitset features) + { + testcase("nft_buy_offers and nft_sell_offers"); + + // The default limit on returned NFToken offers is 250, so we need + // to produce more than 250 offers of each kind in order to exercise + // the marker. + + // Fortunately there's nothing in the rules that says an account + // can't hold more than one offer for the same NFT. So we only + // need two accounts to generate the necessary offers. + using namespace test::jtx; + + Env env{*this, features}; + + Account const issuer{"issuer"}; + Account const buyer{"buyer"}; + + // A lot of offers requires a lot for reserve. + env.fund(XRP(1000000), issuer, buyer); + env.close(); + + // Create an NFT that we'll make offers for. + uint256 const nftID{token::getNextID(env, issuer, 0u, tfTransferable)}; + env(token::mint(issuer, 0), txflags(tfTransferable)); + env.close(); + + // A lambda that validates nft_XXX_offers query responses. + auto checkOffers = [this, &env, &nftID]( + char const* request, + int expectCount, + int expectMarkerCount, + int line) { + int markerCount = 0; + Json::Value allOffers(Json::arrayValue); + std::string marker; + + // The do/while collects results until no marker is returned. + do + { + Json::Value nftOffers = [&env, &nftID, &request, &marker]() { + Json::Value params; + params[jss::nft_id] = to_string(nftID); + + if (!marker.empty()) + params[jss::marker] = marker; + return env.rpc("json", request, to_string(params)); + }(); + + // If there are no offers for the NFT we get an error + if (expectCount == 0) + { + if (expect( + nftOffers.isMember(jss::result), + "expected \"result\"", + __FILE__, + line)) + { + if (expect( + nftOffers[jss::result].isMember(jss::error), + "expected \"error\"", + __FILE__, + line)) + { + expect( + nftOffers[jss::result][jss::error].asString() == + "objectNotFound", + "expected \"objectNotFound\"", + __FILE__, + line); + } + } + break; + } + + marker.clear(); + if (expect( + nftOffers.isMember(jss::result), + "expected \"result\"", + __FILE__, + line)) + { + Json::Value& result = nftOffers[jss::result]; + + if (result.isMember(jss::marker)) + { + ++markerCount; + marker = result[jss::marker].asString(); + } + + if (expect( + result.isMember(jss::offers), + "expected \"offers\"", + __FILE__, + line)) + { + Json::Value& someOffers = result[jss::offers]; + for (std::size_t i = 0; i < someOffers.size(); ++i) + allOffers.append(someOffers[i]); + } + } + } while (!marker.empty()); + + // Verify the contents of allOffers makes sense. + expect( + allOffers.size() == expectCount, + "Unexpected returned offer count", + __FILE__, + line); + expect( + markerCount == expectMarkerCount, + "Unexpected marker count", + __FILE__, + line); + std::optional globalFlags; + std::set offerIndexes; + std::set amounts; + for (Json::Value const& offer : allOffers) + { + // The flags on all found offers should be the same. + if (!globalFlags) + globalFlags = offer[jss::flags].asInt(); + + expect( + *globalFlags == offer[jss::flags].asInt(), + "Inconsistent flags returned", + __FILE__, + line); + + // The test conditions should produce unique indexes and + // amounts for all offers. + offerIndexes.insert(offer[jss::nft_offer_index].asString()); + amounts.insert(offer[jss::amount].asString()); + } + + expect( + offerIndexes.size() == expectCount, + "Duplicate indexes returned?", + __FILE__, + line); + expect( + amounts.size() == expectCount, + "Duplicate amounts returned?", + __FILE__, + line); + }; + + // There are no sell offers. + checkOffers("nft_sell_offers", 0, false, __LINE__); + + // A lambda that generates sell offers. + STAmount sellPrice = XRP(0); + auto makeSellOffers = + [&env, &issuer, &nftID, &sellPrice](STAmount const& limit) { + // Save a little test time by not closing too often. + int offerCount = 0; + while (sellPrice < limit) + { + sellPrice += XRP(1); + env(token::createOffer(issuer, nftID, sellPrice), + txflags(tfSellNFToken)); + if (++offerCount % 10 == 0) + env.close(); + } + env.close(); + }; + + // There is one sell offer. + makeSellOffers(XRP(1)); + checkOffers("nft_sell_offers", 1, 0, __LINE__); + + // There are 250 sell offers. + makeSellOffers(XRP(250)); + checkOffers("nft_sell_offers", 250, 0, __LINE__); + + // There are 251 sell offers. + makeSellOffers(XRP(251)); + checkOffers("nft_sell_offers", 251, 1, __LINE__); + + // There are 500 sell offers. + makeSellOffers(XRP(500)); + checkOffers("nft_sell_offers", 500, 1, __LINE__); + + // There are 501 sell offers. + makeSellOffers(XRP(501)); + checkOffers("nft_sell_offers", 501, 2, __LINE__); + + // There are no buy offers. + checkOffers("nft_buy_offers", 0, 0, __LINE__); + + // A lambda that generates buy offers. + STAmount buyPrice = XRP(0); + auto makeBuyOffers = + [&env, &buyer, &issuer, &nftID, &buyPrice](STAmount const& limit) { + // Save a little test time by not closing too often. + int offerCount = 0; + while (buyPrice < limit) + { + buyPrice += XRP(1); + env(token::createOffer(buyer, nftID, buyPrice), + token::owner(issuer)); + if (++offerCount % 10 == 0) + env.close(); + } + env.close(); + }; + + // There is one buy offer; + makeBuyOffers(XRP(1)); + checkOffers("nft_buy_offers", 1, 0, __LINE__); + + // There are 250 buy offers. + makeBuyOffers(XRP(250)); + checkOffers("nft_buy_offers", 250, 0, __LINE__); + + // There are 251 buy offers. + makeBuyOffers(XRP(251)); + checkOffers("nft_buy_offers", 251, 1, __LINE__); + + // There are 500 buy offers. + makeBuyOffers(XRP(500)); + checkOffers("nft_buy_offers", 500, 1, __LINE__); + + // There are 501 buy offers. + makeBuyOffers(XRP(501)); + checkOffers("nft_buy_offers", 501, 2, __LINE__); + } + void testWithFeats(FeatureBitset features) { @@ -4271,8 +4580,10 @@ class NFToken_test : public beast::unit_test::suite testCancelOffers(features); testCancelTooManyOffers(features); testBrokeredAccept(features); + testNFTokenOfferOwner(features); testNFTokenWithTickets(features); testNFTokenDeleteAccount(features); + testNftXxxOffers(features); } public: @@ -4280,8 +4591,11 @@ class NFToken_test : public beast::unit_test::suite run() override { using namespace test::jtx; - auto const sa = supported_amendments(); - testWithFeats(sa); + FeatureBitset const all{supported_amendments()}; + FeatureBitset const fixNFTDir{fixNFTokenDirV1}; + + testWithFeats(all - fixNFTDir); + testWithFeats(all); } }; diff --git a/src/test/app/Path_test.cpp b/src/test/app/Path_test.cpp index b83b0d00deb..05d23e82976 100644 --- a/src/test/app/Path_test.cpp +++ b/src/test/app/Path_test.cpp @@ -1378,6 +1378,69 @@ class Path_test : public beast::unit_test::suite } } + void + noripple_combinations() + { + using namespace jtx; + // This test will create trust lines with various values of the noRipple + // flag. alice <-> george <-> bob george will sort of act like a + // gateway, but use a different name to avoid the usual assumptions + // about gateways. + auto const alice = Account("alice"); + auto const bob = Account("bob"); + auto const george = Account("george"); + auto const USD = george["USD"]; + auto test = [&](std::string casename, + bool aliceRipple, + bool bobRipple, + bool expectPath) { + testcase(casename); + + Env env = pathTestEnv(); + env.fund(XRP(10000), noripple(alice, bob, george)); + env.close(); + // Set the same flags at both ends of the trustline, even though + // only george's matter. + env(trust( + alice, + USD(100), + aliceRipple ? tfClearNoRipple : tfSetNoRipple)); + env(trust( + george, + alice["USD"](100), + aliceRipple ? tfClearNoRipple : tfSetNoRipple)); + env(trust( + bob, USD(100), bobRipple ? tfClearNoRipple : tfSetNoRipple)); + env(trust( + george, + bob["USD"](100), + bobRipple ? tfClearNoRipple : tfSetNoRipple)); + env.close(); + env(pay(george, alice, USD(70))); + env.close(); + + auto [st, sa, da] = + find_paths(env, "alice", "bob", Account("bob")["USD"](5)); + BEAST_EXPECT(equal(da, bob["USD"](5))); + + if (expectPath) + { + BEAST_EXPECT(st.size() == 1); + BEAST_EXPECT(same(st, stpath("george"))); + BEAST_EXPECT(equal(sa, alice["USD"](5))); + } + else + { + BEAST_EXPECT(st.size() == 0); + BEAST_EXPECT(equal(sa, XRP(0))); + } + }; + test("ripple -> ripple", true, true, true); + test("ripple -> no ripple", true, false, true); + test("no ripple -> ripple", false, true, true); + test("no ripple -> no ripple", false, false, false); + } + void run() override { @@ -1401,6 +1464,7 @@ class Path_test : public beast::unit_test::suite trust_auto_clear_trust_auto_clear(); xrp_to_xrp(); receive_max(); + noripple_combinations(); // The following path_find_NN tests are data driven tests // that were originally implemented in js/coffee and migrated diff --git a/src/test/app/SHAMapStore_test.cpp b/src/test/app/SHAMapStore_test.cpp index fd21983a48b..010c83a4210 100644 --- a/src/test/app/SHAMapStore_test.cpp +++ b/src/test/app/SHAMapStore_test.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -65,7 +65,7 @@ class SHAMapStore_test : public beast::unit_test::suite auto const seq = json[jss::result][jss::ledger_index].asUInt(); std::optional oinfo = - env.app().getRelationalDBInterface().getLedgerInfoByIndex(seq); + env.app().getRelationalDatabase().getLedgerInfoByIndex(seq); if (!oinfo) return false; const LedgerInfo& info = oinfo.value(); @@ -120,8 +120,7 @@ class SHAMapStore_test : public beast::unit_test::suite ledgerCheck(jtx::Env& env, int const rows, int const first) { const auto [actualRows, actualFirst, actualLast] = - dynamic_cast( - &env.app().getRelationalDBInterface()) + dynamic_cast(&env.app().getRelationalDatabase()) ->getLedgerCountMinMax(); BEAST_EXPECT(actualRows == rows); @@ -133,8 +132,7 @@ class SHAMapStore_test : public beast::unit_test::suite transactionCheck(jtx::Env& env, int const rows) { BEAST_EXPECT( - dynamic_cast( - &env.app().getRelationalDBInterface()) + dynamic_cast(&env.app().getRelationalDatabase()) ->getTransactionCount() == rows); } @@ -142,8 +140,7 @@ class SHAMapStore_test : public beast::unit_test::suite accountTransactionCheck(jtx::Env& env, int const rows) { BEAST_EXPECT( - dynamic_cast( - &env.app().getRelationalDBInterface()) + dynamic_cast(&env.app().getRelationalDatabase()) ->getAccountTransactionCount() == rows); } diff --git a/src/test/jtx/impl/multisign.cpp b/src/test/jtx/impl/multisign.cpp index 129f3070145..1e1f5141798 100644 --- a/src/test/jtx/impl/multisign.cpp +++ b/src/test/jtx/impl/multisign.cpp @@ -22,6 +22,8 @@ #include #include #include +#include +#include #include #include @@ -46,6 +48,8 @@ signers( auto& je = ja[i][sfSignerEntry.getJsonName()]; je[jss::Account] = e.account.human(); je[sfSignerWeight.getJsonName()] = e.weight; + if (e.tag) + je[sfWalletLocator.getJsonName()] = to_string(*e.tag); } return jv; } diff --git a/src/test/jtx/multisign.h b/src/test/jtx/multisign.h index 91a110352cd..ab9996e415d 100644 --- a/src/test/jtx/multisign.h +++ b/src/test/jtx/multisign.h @@ -21,6 +21,7 @@ #define RIPPLE_TEST_JTX_MULTISIGN_H_INCLUDED #include +#include #include #include #include @@ -35,9 +36,13 @@ struct signer { std::uint32_t weight; Account account; + std::optional tag; - signer(Account account_, std::uint32_t weight_ = 1) - : weight(weight_), account(std::move(account_)) + signer( + Account account_, + std::uint32_t weight_ = 1, + std::optional tag_ = std::nullopt) + : weight(weight_), account(std::move(account_)), tag(std::move(tag_)) { } }; diff --git a/src/test/net/DatabaseDownloader_test.cpp b/src/test/net/DatabaseDownloader_test.cpp index 749000dc50a..d4ed2ebcedf 100644 --- a/src/test/net/DatabaseDownloader_test.cpp +++ b/src/test/net/DatabaseDownloader_test.cpp @@ -19,7 +19,6 @@ #include #include -#include #include #include #include @@ -29,6 +28,8 @@ namespace ripple { namespace test { +#define REPORT_FAILURE(D) reportFailure(D, __FILE__, __LINE__) + class DatabaseDownloader_test : public beast::unit_test::suite { std::shared_ptr @@ -65,13 +66,10 @@ class DatabaseDownloader_test : public beast::unit_test::suite waitComplete() { std::unique_lock lk(m); - using namespace std::chrono_literals; -#if BOOST_OS_WINDOWS - auto constexpr timeout = 4s; -#else - auto constexpr timeout = 2s; -#endif - auto stat = cv.wait_for(lk, timeout, [this] { return called; }); + + auto stat = cv.wait_for( + lk, std::chrono::seconds(10), [this] { return called; }); + called = false; return stat; }; @@ -103,8 +101,29 @@ class DatabaseDownloader_test : public beast::unit_test::suite { return ptr_.get(); } + + DatabaseDownloader const* + operator->() const + { + return ptr_.get(); + } }; + void + reportFailure(Downloader const& dl, char const* file, int line) + { + std::stringstream ss; + ss << "Failed. LOGS:\n" + << dl.sink_.messages().str() + << "\nDownloadCompleter failure." + "\nDatabaseDownloader session active? " + << std::boolalpha << dl->sessionIsActive() + << "\nDatabaseDownloader is stopping? " << std::boolalpha + << dl->isStopping(); + + fail(ss.str(), file, line); + } + void testDownload(bool verify) { @@ -122,7 +141,7 @@ class DatabaseDownloader_test : public beast::unit_test::suite return cfg; })}; - Downloader downloader{env}; + Downloader dl{env}; // create a TrustedPublisherServer as a simple HTTP // server to request from. Use the /textfile endpoint @@ -133,7 +152,7 @@ class DatabaseDownloader_test : public beast::unit_test::suite *this, "downloads", "data", "", false, false}; // initiate the download and wait for the callback // to be invoked - auto stat = downloader->download( + auto stat = dl->download( server->local_endpoint().address().to_string(), std::to_string(server->local_endpoint().port()), "/textfile", @@ -142,12 +161,12 @@ class DatabaseDownloader_test : public beast::unit_test::suite std::function{std::ref(cb)}); if (!BEAST_EXPECT(stat)) { - log << "Failed. LOGS:\n" + downloader.sink_.messages().str(); + REPORT_FAILURE(dl); return; } if (!BEAST_EXPECT(cb.waitComplete())) { - log << "Failed. LOGS:\n" + downloader.sink_.messages().str(); + REPORT_FAILURE(dl); return; } BEAST_EXPECT(cb.dest == data.file()); @@ -187,7 +206,10 @@ class DatabaseDownloader_test : public beast::unit_test::suite datafile.file(), std::function{ std::ref(cb)})); - BEAST_EXPECT(cb.waitComplete()); + if (!BEAST_EXPECT(cb.waitComplete())) + { + REPORT_FAILURE(dl); + } BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); BEAST_EXPECTS( dl.sink_.messages().str().find("async_resolve") != @@ -211,7 +233,10 @@ class DatabaseDownloader_test : public beast::unit_test::suite 11, datafile.file(), std::function{std::ref(cb)})); - BEAST_EXPECT(cb.waitComplete()); + if (!BEAST_EXPECT(cb.waitComplete())) + { + REPORT_FAILURE(dl); + } BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); BEAST_EXPECTS( dl.sink_.messages().str().find("async_connect") != @@ -231,7 +256,10 @@ class DatabaseDownloader_test : public beast::unit_test::suite 11, datafile.file(), std::function{std::ref(cb)})); - BEAST_EXPECT(cb.waitComplete()); + if (!BEAST_EXPECT(cb.waitComplete())) + { + REPORT_FAILURE(dl); + } BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); BEAST_EXPECTS( dl.sink_.messages().str().find("async_handshake") != @@ -251,7 +279,10 @@ class DatabaseDownloader_test : public beast::unit_test::suite 11, datafile.file(), std::function{std::ref(cb)})); - BEAST_EXPECT(cb.waitComplete()); + if (!BEAST_EXPECT(cb.waitComplete())) + { + REPORT_FAILURE(dl); + } BEAST_EXPECT(!boost::filesystem::exists(datafile.file())); BEAST_EXPECTS( dl.sink_.messages().str().find("Insufficient disk space") != @@ -270,6 +301,8 @@ class DatabaseDownloader_test : public beast::unit_test::suite } }; +#undef REPORT_FAILURE + BEAST_DEFINE_TESTSUITE(DatabaseDownloader, net, ripple); } // namespace test } // namespace ripple diff --git a/src/test/nodestore/DatabaseShard_test.cpp b/src/test/nodestore/DatabaseShard_test.cpp index 88bcd49771a..5c074b14938 100644 --- a/src/test/nodestore/DatabaseShard_test.cpp +++ b/src/test/nodestore/DatabaseShard_test.cpp @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include #include #include @@ -1762,9 +1762,9 @@ class DatabaseShard_test : public TestBase } void - testRelationalDBInterfaceSqlite(std::uint64_t const seedValue) + testSQLiteDatabase(std::uint64_t const seedValue) { - testcase("Relational DB Interface SQLite"); + testcase("SQLite Database"); using namespace test::jtx; @@ -1782,8 +1782,8 @@ class DatabaseShard_test : public TestBase BEAST_EXPECT(shardStore->getShardInfo()->finalized().empty()); BEAST_EXPECT(shardStore->getShardInfo()->incompleteToString().empty()); - auto rdb = dynamic_cast( - &env.app().getRelationalDBInterface()); + auto rdb = + dynamic_cast(&env.app().getRelationalDatabase()); BEAST_EXPECT(rdb); @@ -1796,7 +1796,7 @@ class DatabaseShard_test : public TestBase return; } - // Close these databases to force the RelationalDBInterfaceSqlite + // Close these databases to force the SQLiteDatabase // to use the shard databases and lookup tables. rdb->closeLedgerDB(); rdb->closeTransactionDB(); @@ -1814,7 +1814,7 @@ class DatabaseShard_test : public TestBase for (auto const& ledger : data.ledgers_) { // Compare each test ledger to the data retrieved - // from the RelationalDBInterfaceSqlite class + // from the SQLiteDatabase class if (shardStore->seqToShardIndex(ledger->seq()) < shardStore->earliestShardIndex() || @@ -1829,8 +1829,7 @@ class DatabaseShard_test : public TestBase for (auto const& transaction : ledger->txs) { // Compare each test transaction to the data - // retrieved from the RelationalDBInterfaceSqlite - // class + // retrieved from the SQLiteDatabase class error_code_i error{rpcSUCCESS}; @@ -1885,7 +1884,7 @@ class DatabaseShard_test : public TestBase testPrepareWithHistoricalPaths(seedValue()); testOpenShardManagement(seedValue()); testShardInfo(seedValue()); - testRelationalDBInterfaceSqlite(seedValue()); + testSQLiteDatabase(seedValue()); } }; diff --git a/src/test/protocol/STTx_test.cpp b/src/test/protocol/STTx_test.cpp index 5d24efcfcd4..4ef30fb7a74 100644 --- a/src/test/protocol/STTx_test.cpp +++ b/src/test/protocol/STTx_test.cpp @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -27,7 +28,6 @@ #include #include #include - #include #include @@ -1591,8 +1591,10 @@ class STTx_test : public beast::unit_test::suite }); j.sign(keypair.first, keypair.second); + Rules defaultRules{{}}; + unexpected( - !j.checkSign(STTx::RequireFullyCanonicalSig::yes), + !j.checkSign(STTx::RequireFullyCanonicalSig::yes, defaultRules), "Transaction fails signature test"); Serializer rawTxn; diff --git a/src/test/rpc/AmendmentBlocked_test.cpp b/src/test/rpc/AmendmentBlocked_test.cpp index 54c87292229..a90bcdcd0c4 100644 --- a/src/test/rpc/AmendmentBlocked_test.cpp +++ b/src/test/rpc/AmendmentBlocked_test.cpp @@ -43,6 +43,12 @@ class AmendmentBlocked_test : public beast::unit_test::suite Account const ali{"ali", KeyType::secp256k1}; env.fund(XRP(10000), alice, bob, gw); env.memoize(ali); + // This close() ensures that all the accounts get created and their + // default ripple flag gets set before the trust lines are created. + // Without it, the ordering manages to create alice's trust line with + // noRipple set on gw's end. The existing tests pass either way, but + // better to do it right. + env.close(); env.trust(USD(600), alice); env.trust(USD(700), bob); env(pay(gw, alice, USD(70))); diff --git a/src/test/rpc/ShardArchiveHandler_test.cpp b/src/test/rpc/ShardArchiveHandler_test.cpp index 296699b3c7c..37c9d016843 100644 --- a/src/test/rpc/ShardArchiveHandler_test.cpp +++ b/src/test/rpc/ShardArchiveHandler_test.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include #include #include diff --git a/src/test/rpc/Transaction_test.cpp b/src/test/rpc/Transaction_test.cpp index a20a20aa617..08e97c1c20a 100644 --- a/src/test/rpc/Transaction_test.cpp +++ b/src/test/rpc/Transaction_test.cpp @@ -17,7 +17,7 @@ */ //============================================================================== -#include +#include #include #include #include @@ -119,8 +119,7 @@ class Transaction_test : public beast::unit_test::suite const auto deletedLedger = (startLegSeq + endLegSeq) / 2; { // Remove one of the ledgers from the database directly - dynamic_cast( - &env.app().getRelationalDBInterface()) + dynamic_cast(&env.app().getRelationalDatabase()) ->deleteTransactionByLedgerSeq(deletedLedger); } diff --git a/src/test/rpc/Tx_test.cpp b/src/test/rpc/Tx_test.cpp index e4e0507b996..8cfd36c5d86 100644 --- a/src/test/rpc/Tx_test.cpp +++ b/src/test/rpc/Tx_test.cpp @@ -19,7 +19,7 @@ #include #include -#include +#include #include #include #include @@ -769,8 +769,7 @@ class Tx_test : public beast::unit_test::suite const auto deletedLedger = (startLegSeq + endLegSeq) / 2; { // Remove one of the ledgers from the database directly - dynamic_cast( - &env.app().getRelationalDBInterface()) + dynamic_cast(&env.app().getRelationalDatabase()) ->deleteTransactionByLedgerSeq(deletedLedger); }